VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102572

Last change on this file since 102572 was 102435, checked in by vboxsync, 12 months ago

VMM/IEM: doxygen fix. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 444.7 KB
Line 
1/* $Id: IEMAll.cpp 102435 2023-12-02 11:38:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller == pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.pbInstrBuf = NULL;
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 if (pVCpu->iem.s.pbInstrBuf != NULL)
861 {
862 if (offBuf < pVCpu->iem.s.cbInstrBuf)
863 {
864 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
865 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
866 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
867
868 cbDst -= cbCopy;
869 pvDst = (uint8_t *)pvDst + cbCopy;
870 offBuf += cbCopy;
871 pVCpu->iem.s.offInstrNextByte += offBuf;
872 }
873 }
874
875 /*
876 * Check segment limit, figuring how much we're allowed to access at this point.
877 *
878 * We will fault immediately if RIP is past the segment limit / in non-canonical
879 * territory. If we do continue, there are one or more bytes to read before we
880 * end up in trouble and we need to do that first before faulting.
881 */
882 RTGCPTR GCPtrFirst;
883 uint32_t cbMaxRead;
884 if (IEM_IS_64BIT_CODE(pVCpu))
885 {
886 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
887 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
888 { /* likely */ }
889 else
890 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
891 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
892 }
893 else
894 {
895 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
896 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
897 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
898 { /* likely */ }
899 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
900 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
901 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
902 if (cbMaxRead != 0)
903 { /* likely */ }
904 else
905 {
906 /* Overflowed because address is 0 and limit is max. */
907 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
908 cbMaxRead = X86_PAGE_SIZE;
909 }
910 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
911 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
912 if (cbMaxRead2 < cbMaxRead)
913 cbMaxRead = cbMaxRead2;
914 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
915 }
916
917 /*
918 * Get the TLB entry for this piece of code.
919 */
920 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
921 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
922 if (pTlbe->uTag == uTag)
923 {
924 /* likely when executing lots of code, otherwise unlikely */
925# ifdef VBOX_WITH_STATISTICS
926 pVCpu->iem.s.CodeTlb.cTlbHits++;
927# endif
928 }
929 else
930 {
931 pVCpu->iem.s.CodeTlb.cTlbMisses++;
932 PGMPTWALK Walk;
933 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
934 if (RT_FAILURE(rc))
935 {
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
937 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
938 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
939#endif
940 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
941 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
942 }
943
944 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
945 Assert(Walk.fSucceeded);
946 pTlbe->uTag = uTag;
947 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
948 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
949 pTlbe->GCPhys = Walk.GCPhys;
950 pTlbe->pbMappingR3 = NULL;
951 }
952
953 /*
954 * Check TLB page table level access flags.
955 */
956 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
957 {
958 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
959 {
960 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
961 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
962 }
963 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
964 {
965 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
966 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
967 }
968 }
969
970 /*
971 * Look up the physical page info if necessary.
972 */
973 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 { /* not necessary */ }
975 else
976 {
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
981 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
982 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
983 { /* likely */ }
984 else
985 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
986 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
987 | IEMTLBE_F_NO_MAPPINGR3
988 | IEMTLBE_F_PG_NO_READ
989 | IEMTLBE_F_PG_NO_WRITE
990 | IEMTLBE_F_PG_UNASSIGNED
991 | IEMTLBE_F_PG_CODE_PAGE);
992 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
993 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
994 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
995 }
996
997# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
998 /*
999 * Try do a direct read using the pbMappingR3 pointer.
1000 */
1001 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1002 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1003 {
1004 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1005 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1006 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1007 {
1008 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1009 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1010 }
1011 else
1012 {
1013 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1014 if (cbInstr + (uint32_t)cbDst <= 15)
1015 {
1016 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1017 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1018 }
1019 else
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1023 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1024 }
1025 }
1026 if (cbDst <= cbMaxRead)
1027 {
1028 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1029 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1030
1031 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1032 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1033 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1034 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1035 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1036 return;
1037 }
1038 pVCpu->iem.s.pbInstrBuf = NULL;
1039
1040 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1041 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1042 }
1043# else
1044# error "refactor as needed"
1045 /*
1046 * If there is no special read handling, so we can read a bit more and
1047 * put it in the prefetch buffer.
1048 */
1049 if ( cbDst < cbMaxRead
1050 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1053 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1062 }
1063 else
1064 {
1065 Log((RT_SUCCESS(rcStrict)
1066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1069 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1070 }
1071 }
1072# endif
1073 /*
1074 * Special read handling, so only read exactly what's needed.
1075 * This is a highly unlikely scenario.
1076 */
1077 else
1078 {
1079 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1080
1081 /* Check instruction length. */
1082 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1083 if (RT_LIKELY(cbInstr + cbDst <= 15))
1084 { /* likely */ }
1085 else
1086 {
1087 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1090 }
1091
1092 /* Do the reading. */
1093 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1094 if (cbToRead > 0)
1095 {
1096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1097 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1099 { /* likely */ }
1100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1103 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1105 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1106 }
1107 else
1108 {
1109 Log((RT_SUCCESS(rcStrict)
1110 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1111 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1112 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1113 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1114 }
1115 }
1116
1117 /* Update the state and probably return. */
1118 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1119 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1120 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1121
1122 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1123 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1124 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1125 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1126 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1127 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1128 pVCpu->iem.s.pbInstrBuf = NULL;
1129 if (cbToRead == cbDst)
1130 return;
1131 }
1132
1133 /*
1134 * More to read, loop.
1135 */
1136 cbDst -= cbMaxRead;
1137 pvDst = (uint8_t *)pvDst + cbMaxRead;
1138 }
1139# else /* !IN_RING3 */
1140 RT_NOREF(pvDst, cbDst);
1141 if (pvDst || cbDst)
1142 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1143# endif /* !IN_RING3 */
1144}
1145
1146#else /* !IEM_WITH_CODE_TLB */
1147
1148/**
1149 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1150 * exception if it fails.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param cbMin The minimum number of bytes relative offOpcode
1156 * that must be read.
1157 */
1158VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1159{
1160 /*
1161 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1162 *
1163 * First translate CS:rIP to a physical address.
1164 */
1165 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1166 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1167 uint8_t const cbLeft = cbOpcode - offOpcode;
1168 Assert(cbLeft < cbMin);
1169 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1170
1171 uint32_t cbToTryRead;
1172 RTGCPTR GCPtrNext;
1173 if (IEM_IS_64BIT_CODE(pVCpu))
1174 {
1175 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1176 if (!IEM_IS_CANONICAL(GCPtrNext))
1177 return iemRaiseGeneralProtectionFault0(pVCpu);
1178 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1179 }
1180 else
1181 {
1182 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1183 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1184 GCPtrNext32 += cbOpcode;
1185 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1186 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1187 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1188 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1189 if (!cbToTryRead) /* overflowed */
1190 {
1191 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1192 cbToTryRead = UINT32_MAX;
1193 /** @todo check out wrapping around the code segment. */
1194 }
1195 if (cbToTryRead < cbMin - cbLeft)
1196 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1197 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1198
1199 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 if (cbToTryRead > cbLeftOnPage)
1201 cbToTryRead = cbLeftOnPage;
1202 }
1203
1204 /* Restrict to opcode buffer space.
1205
1206 We're making ASSUMPTIONS here based on work done previously in
1207 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1208 be fetched in case of an instruction crossing two pages. */
1209 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1210 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1211 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1212 { /* likely */ }
1213 else
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1217 return iemRaiseGeneralProtectionFault0(pVCpu);
1218 }
1219
1220 PGMPTWALK Walk;
1221 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1222 if (RT_FAILURE(rc))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1226 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1227 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1228#endif
1229 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1230 }
1231 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1232 {
1233 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1234#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1237#endif
1238 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1239 }
1240 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1244 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1245 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1246#endif
1247 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1248 }
1249 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1250 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255 /*
1256 * Read the bytes at this address.
1257 *
1258 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1259 * and since PATM should only patch the start of an instruction there
1260 * should be no need to check again here.
1261 */
1262 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1263 {
1264 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1265 cbToTryRead, PGMACCESSORIGIN_IEM);
1266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1267 { /* likely */ }
1268 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1269 {
1270 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1271 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1273 }
1274 else
1275 {
1276 Log((RT_SUCCESS(rcStrict)
1277 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1278 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1279 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 return rcStrict;
1281 }
1282 }
1283 else
1284 {
1285 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1286 if (RT_SUCCESS(rc))
1287 { /* likely */ }
1288 else
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1291 return rc;
1292 }
1293 }
1294 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1295 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1296
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IEM_WITH_CODE_TLB */
1301#ifndef IEM_WITH_SETJMP
1302
1303/**
1304 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1305 *
1306 * @returns Strict VBox status code.
1307 * @param pVCpu The cross context virtual CPU structure of the
1308 * calling thread.
1309 * @param pb Where to return the opcode byte.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1312{
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1317 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1318 pVCpu->iem.s.offOpcode = offOpcode + 1;
1319 }
1320 else
1321 *pb = 0;
1322 return rcStrict;
1323}
1324
1325#else /* IEM_WITH_SETJMP */
1326
1327/**
1328 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1329 *
1330 * @returns The opcode byte.
1331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1332 */
1333uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1334{
1335# ifdef IEM_WITH_CODE_TLB
1336 uint8_t u8;
1337 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1338 return u8;
1339# else
1340 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1341 if (rcStrict == VINF_SUCCESS)
1342 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1343 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1344# endif
1345}
1346
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu16 Where to return the opcode dword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu16 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 * @param pu32 Where to return the opcode dword.
1374 */
1375VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu32 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uint8_t u8;
1395 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1396 if (rcStrict == VINF_SUCCESS)
1397 *pu64 = (int8_t)u8;
1398 return rcStrict;
1399}
1400
1401#endif /* !IEM_WITH_SETJMP */
1402
1403
1404#ifndef IEM_WITH_SETJMP
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu16 Where to return the opcode word.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1414{
1415 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1419# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1420 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1421# else
1422 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1423# endif
1424 pVCpu->iem.s.offOpcode = offOpcode + 2;
1425 }
1426 else
1427 *pu16 = 0;
1428 return rcStrict;
1429}
1430
1431#else /* IEM_WITH_SETJMP */
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1435 *
1436 * @returns The opcode word.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 */
1439uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1440{
1441# ifdef IEM_WITH_CODE_TLB
1442 uint16_t u16;
1443 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1444 return u16;
1445# else
1446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1447 if (rcStrict == VINF_SUCCESS)
1448 {
1449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1450 pVCpu->iem.s.offOpcode += 2;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 }
1457 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1458# endif
1459}
1460
1461#endif /* IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode double word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode quad word.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1501 pVCpu->iem.s.offOpcode = offOpcode + 2;
1502 }
1503 else
1504 *pu64 = 0;
1505 return rcStrict;
1506}
1507
1508#endif /* !IEM_WITH_SETJMP */
1509
1510#ifndef IEM_WITH_SETJMP
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu32 Where to return the opcode dword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1526 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1527# else
1528 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1529 pVCpu->iem.s.abOpcode[offOpcode + 1],
1530 pVCpu->iem.s.abOpcode[offOpcode + 2],
1531 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1532# endif
1533 pVCpu->iem.s.offOpcode = offOpcode + 4;
1534 }
1535 else
1536 *pu32 = 0;
1537 return rcStrict;
1538}
1539
1540#else /* IEM_WITH_SETJMP */
1541
1542/**
1543 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1544 *
1545 * @returns The opcode dword.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 */
1548uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1549{
1550# ifdef IEM_WITH_CODE_TLB
1551 uint32_t u32;
1552 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1553 return u32;
1554# else
1555 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1556 if (rcStrict == VINF_SUCCESS)
1557 {
1558 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1559 pVCpu->iem.s.offOpcode = offOpcode + 4;
1560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1561 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1562# else
1563 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567# endif
1568 }
1569 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1570# endif
1571}
1572
1573#endif /* IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode dword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1591 pVCpu->iem.s.abOpcode[offOpcode + 1],
1592 pVCpu->iem.s.abOpcode[offOpcode + 2],
1593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1594 pVCpu->iem.s.offOpcode = offOpcode + 4;
1595 }
1596 else
1597 *pu64 = 0;
1598 return rcStrict;
1599}
1600
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param pu64 Where to return the opcode qword.
1608 */
1609VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1610{
1611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1612 if (rcStrict == VINF_SUCCESS)
1613 {
1614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1615 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1616 pVCpu->iem.s.abOpcode[offOpcode + 1],
1617 pVCpu->iem.s.abOpcode[offOpcode + 2],
1618 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1619 pVCpu->iem.s.offOpcode = offOpcode + 4;
1620 }
1621 else
1622 *pu64 = 0;
1623 return rcStrict;
1624}
1625
1626#endif /* !IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu64 Where to return the opcode qword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1638{
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1640 if (rcStrict == VINF_SUCCESS)
1641 {
1642 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1644 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1645# else
1646 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1647 pVCpu->iem.s.abOpcode[offOpcode + 1],
1648 pVCpu->iem.s.abOpcode[offOpcode + 2],
1649 pVCpu->iem.s.abOpcode[offOpcode + 3],
1650 pVCpu->iem.s.abOpcode[offOpcode + 4],
1651 pVCpu->iem.s.abOpcode[offOpcode + 5],
1652 pVCpu->iem.s.abOpcode[offOpcode + 6],
1653 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1654# endif
1655 pVCpu->iem.s.offOpcode = offOpcode + 8;
1656 }
1657 else
1658 *pu64 = 0;
1659 return rcStrict;
1660}
1661
1662#else /* IEM_WITH_SETJMP */
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1666 *
1667 * @returns The opcode qword.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 */
1670uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1671{
1672# ifdef IEM_WITH_CODE_TLB
1673 uint64_t u64;
1674 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1675 return u64;
1676# else
1677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1678 if (rcStrict == VINF_SUCCESS)
1679 {
1680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1681 pVCpu->iem.s.offOpcode = offOpcode + 8;
1682# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1683 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1684# else
1685 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1686 pVCpu->iem.s.abOpcode[offOpcode + 1],
1687 pVCpu->iem.s.abOpcode[offOpcode + 2],
1688 pVCpu->iem.s.abOpcode[offOpcode + 3],
1689 pVCpu->iem.s.abOpcode[offOpcode + 4],
1690 pVCpu->iem.s.abOpcode[offOpcode + 5],
1691 pVCpu->iem.s.abOpcode[offOpcode + 6],
1692 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1693# endif
1694 }
1695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1696# endif
1697}
1698
1699#endif /* IEM_WITH_SETJMP */
1700
1701
1702
1703/** @name Misc Worker Functions.
1704 * @{
1705 */
1706
1707/**
1708 * Gets the exception class for the specified exception vector.
1709 *
1710 * @returns The class of the specified exception.
1711 * @param uVector The exception vector.
1712 */
1713static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1714{
1715 Assert(uVector <= X86_XCPT_LAST);
1716 switch (uVector)
1717 {
1718 case X86_XCPT_DE:
1719 case X86_XCPT_TS:
1720 case X86_XCPT_NP:
1721 case X86_XCPT_SS:
1722 case X86_XCPT_GP:
1723 case X86_XCPT_SX: /* AMD only */
1724 return IEMXCPTCLASS_CONTRIBUTORY;
1725
1726 case X86_XCPT_PF:
1727 case X86_XCPT_VE: /* Intel only */
1728 return IEMXCPTCLASS_PAGE_FAULT;
1729
1730 case X86_XCPT_DF:
1731 return IEMXCPTCLASS_DOUBLE_FAULT;
1732 }
1733 return IEMXCPTCLASS_BENIGN;
1734}
1735
1736
1737/**
1738 * Evaluates how to handle an exception caused during delivery of another event
1739 * (exception / interrupt).
1740 *
1741 * @returns How to handle the recursive exception.
1742 * @param pVCpu The cross context virtual CPU structure of the
1743 * calling thread.
1744 * @param fPrevFlags The flags of the previous event.
1745 * @param uPrevVector The vector of the previous event.
1746 * @param fCurFlags The flags of the current exception.
1747 * @param uCurVector The vector of the current exception.
1748 * @param pfXcptRaiseInfo Where to store additional information about the
1749 * exception condition. Optional.
1750 */
1751VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1752 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1753{
1754 /*
1755 * Only CPU exceptions can be raised while delivering other events, software interrupt
1756 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1757 */
1758 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1759 Assert(pVCpu); RT_NOREF(pVCpu);
1760 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1761
1762 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1763 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1764 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1765 {
1766 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1767 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1768 {
1769 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1770 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1772 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1773 {
1774 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1775 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1776 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1777 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1778 uCurVector, pVCpu->cpum.GstCtx.cr2));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1781 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1782 {
1783 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1784 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1785 }
1786 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1787 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1788 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1789 {
1790 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1791 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1792 }
1793 }
1794 else
1795 {
1796 if (uPrevVector == X86_XCPT_NMI)
1797 {
1798 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1799 if (uCurVector == X86_XCPT_PF)
1800 {
1801 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1802 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1803 }
1804 }
1805 else if ( uPrevVector == X86_XCPT_AC
1806 && uCurVector == X86_XCPT_AC)
1807 {
1808 enmRaise = IEMXCPTRAISE_CPU_HANG;
1809 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1810 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1811 }
1812 }
1813 }
1814 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1815 {
1816 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1817 if (uCurVector == X86_XCPT_PF)
1818 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1819 }
1820 else
1821 {
1822 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1823 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1824 }
1825
1826 if (pfXcptRaiseInfo)
1827 *pfXcptRaiseInfo = fRaiseInfo;
1828 return enmRaise;
1829}
1830
1831
1832/**
1833 * Enters the CPU shutdown state initiated by a triple fault or other
1834 * unrecoverable conditions.
1835 *
1836 * @returns Strict VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure of the
1838 * calling thread.
1839 */
1840static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1843 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1844
1845 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1846 {
1847 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1848 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1849 }
1850
1851 RT_NOREF(pVCpu);
1852 return VINF_EM_TRIPLE_FAULT;
1853}
1854
1855
1856/**
1857 * Validates a new SS segment.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the
1861 * calling thread.
1862 * @param NewSS The new SS selctor.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pDesc Where to return the descriptor.
1865 */
1866static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1867{
1868 /* Null selectors are not allowed (we're not called for dispatching
1869 interrupts with SS=0 in long mode). */
1870 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1871 {
1872 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1873 return iemRaiseTaskSwitchFault0(pVCpu);
1874 }
1875
1876 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1877 if ((NewSS & X86_SEL_RPL) != uCpl)
1878 {
1879 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1880 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1881 }
1882
1883 /*
1884 * Read the descriptor.
1885 */
1886 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1887 if (rcStrict != VINF_SUCCESS)
1888 return rcStrict;
1889
1890 /*
1891 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1892 */
1893 if (!pDesc->Legacy.Gen.u1DescType)
1894 {
1895 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1896 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1897 }
1898
1899 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1900 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1901 {
1902 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1903 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1904 }
1905 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1908 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1909 }
1910
1911 /* Is it there? */
1912 /** @todo testcase: Is this checked before the canonical / limit check below? */
1913 if (!pDesc->Legacy.Gen.u1Present)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1916 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1917 }
1918
1919 return VINF_SUCCESS;
1920}
1921
1922/** @} */
1923
1924
1925/** @name Raising Exceptions.
1926 *
1927 * @{
1928 */
1929
1930
1931/**
1932 * Loads the specified stack far pointer from the TSS.
1933 *
1934 * @returns VBox strict status code.
1935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1936 * @param uCpl The CPL to load the stack for.
1937 * @param pSelSS Where to return the new stack segment.
1938 * @param puEsp Where to return the new stack pointer.
1939 */
1940static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1941{
1942 VBOXSTRICTRC rcStrict;
1943 Assert(uCpl < 4);
1944
1945 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1946 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1947 {
1948 /*
1949 * 16-bit TSS (X86TSS16).
1950 */
1951 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 4 + 2;
1955 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957 /** @todo check actual access pattern here. */
1958 uint32_t u32Tmp = 0; /* gcc maybe... */
1959 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = RT_LOWORD(u32Tmp);
1963 *pSelSS = RT_HIWORD(u32Tmp);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 /*
1976 * 32-bit TSS (X86TSS32).
1977 */
1978 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1979 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1980 {
1981 uint32_t off = uCpl * 8 + 4;
1982 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1983 {
1984/** @todo check actual access pattern here. */
1985 uint64_t u64Tmp;
1986 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1987 if (rcStrict == VINF_SUCCESS)
1988 {
1989 *puEsp = u64Tmp & UINT32_MAX;
1990 *pSelSS = (RTSEL)(u64Tmp >> 32);
1991 return VINF_SUCCESS;
1992 }
1993 }
1994 else
1995 {
1996 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1997 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1998 }
1999 break;
2000 }
2001
2002 default:
2003 AssertFailed();
2004 rcStrict = VERR_IEM_IPE_4;
2005 break;
2006 }
2007
2008 *puEsp = 0; /* make gcc happy */
2009 *pSelSS = 0; /* make gcc happy */
2010 return rcStrict;
2011}
2012
2013
2014/**
2015 * Loads the specified stack pointer from the 64-bit TSS.
2016 *
2017 * @returns VBox strict status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param uCpl The CPL to load the stack for.
2020 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2021 * @param puRsp Where to return the new stack pointer.
2022 */
2023static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2024{
2025 Assert(uCpl < 4);
2026 Assert(uIst < 8);
2027 *puRsp = 0; /* make gcc happy */
2028
2029 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2030 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2031
2032 uint32_t off;
2033 if (uIst)
2034 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2035 else
2036 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2037 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2038 {
2039 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2040 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2041 }
2042
2043 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2044}
2045
2046
2047/**
2048 * Adjust the CPU state according to the exception being raised.
2049 *
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param u8Vector The exception that has been raised.
2052 */
2053DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2054{
2055 switch (u8Vector)
2056 {
2057 case X86_XCPT_DB:
2058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2059 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2060 break;
2061 /** @todo Read the AMD and Intel exception reference... */
2062 }
2063}
2064
2065
2066/**
2067 * Implements exceptions and interrupts for real mode.
2068 *
2069 * @returns VBox strict status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param cbInstr The number of bytes to offset rIP by in the return
2072 * address.
2073 * @param u8Vector The interrupt / exception vector number.
2074 * @param fFlags The flags.
2075 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2076 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2077 */
2078static VBOXSTRICTRC
2079iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2080 uint8_t cbInstr,
2081 uint8_t u8Vector,
2082 uint32_t fFlags,
2083 uint16_t uErr,
2084 uint64_t uCr2) RT_NOEXCEPT
2085{
2086 NOREF(uErr); NOREF(uCr2);
2087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2088
2089 /*
2090 * Read the IDT entry.
2091 */
2092 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2093 {
2094 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097 RTFAR16 Idte;
2098 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2099 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2100 {
2101 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2102 return rcStrict;
2103 }
2104
2105 /*
2106 * Push the stack frame.
2107 */
2108 uint8_t bUnmapInfo;
2109 uint16_t *pu16Frame;
2110 uint64_t uNewRsp;
2111 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2112 if (rcStrict != VINF_SUCCESS)
2113 return rcStrict;
2114
2115 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2116#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2117 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2118 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2119 fEfl |= UINT16_C(0xf000);
2120#endif
2121 pu16Frame[2] = (uint16_t)fEfl;
2122 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2123 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2124 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2125 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2126 return rcStrict;
2127
2128 /*
2129 * Load the vector address into cs:ip and make exception specific state
2130 * adjustments.
2131 */
2132 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2134 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2135 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2136 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2137 pVCpu->cpum.GstCtx.rip = Idte.off;
2138 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2139 IEMMISC_SET_EFL(pVCpu, fEfl);
2140
2141 /** @todo do we actually do this in real mode? */
2142 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2143 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2144
2145 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2146 so best leave them alone in case we're in a weird kind of real mode... */
2147
2148 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Loads a NULL data selector into when coming from V8086 mode.
2154 *
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 * @param pSReg Pointer to the segment register.
2157 */
2158DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2159{
2160 pSReg->Sel = 0;
2161 pSReg->ValidSel = 0;
2162 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2163 {
2164 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2165 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2166 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2167 }
2168 else
2169 {
2170 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2171 /** @todo check this on AMD-V */
2172 pSReg->u64Base = 0;
2173 pSReg->u32Limit = 0;
2174 }
2175}
2176
2177
2178/**
2179 * Loads a segment selector during a task switch in V8086 mode.
2180 *
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The selector value to load.
2183 */
2184DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2185{
2186 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2187 pSReg->Sel = uSel;
2188 pSReg->ValidSel = uSel;
2189 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2190 pSReg->u64Base = uSel << 4;
2191 pSReg->u32Limit = 0xffff;
2192 pSReg->Attr.u = 0xf3;
2193}
2194
2195
2196/**
2197 * Loads a segment selector during a task switch in protected mode.
2198 *
2199 * In this task switch scenario, we would throw \#TS exceptions rather than
2200 * \#GPs.
2201 *
2202 * @returns VBox strict status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param pSReg Pointer to the segment register.
2205 * @param uSel The new selector value.
2206 *
2207 * @remarks This does _not_ handle CS or SS.
2208 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2209 */
2210static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2211{
2212 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2213
2214 /* Null data selector. */
2215 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2216 {
2217 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2219 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2220 return VINF_SUCCESS;
2221 }
2222
2223 /* Fetch the descriptor. */
2224 IEMSELDESC Desc;
2225 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2226 if (rcStrict != VINF_SUCCESS)
2227 {
2228 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2229 VBOXSTRICTRC_VAL(rcStrict)));
2230 return rcStrict;
2231 }
2232
2233 /* Must be a data segment or readable code segment. */
2234 if ( !Desc.Legacy.Gen.u1DescType
2235 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2236 {
2237 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2238 Desc.Legacy.Gen.u4Type));
2239 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2240 }
2241
2242 /* Check privileges for data segments and non-conforming code segments. */
2243 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2245 {
2246 /* The RPL and the new CPL must be less than or equal to the DPL. */
2247 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2248 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2249 {
2250 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2251 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2252 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2253 }
2254 }
2255
2256 /* Is it there? */
2257 if (!Desc.Legacy.Gen.u1Present)
2258 {
2259 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2260 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2261 }
2262
2263 /* The base and limit. */
2264 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2265 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2266
2267 /*
2268 * Ok, everything checked out fine. Now set the accessed bit before
2269 * committing the result into the registers.
2270 */
2271 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2272 {
2273 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2274 if (rcStrict != VINF_SUCCESS)
2275 return rcStrict;
2276 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2277 }
2278
2279 /* Commit */
2280 pSReg->Sel = uSel;
2281 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2282 pSReg->u32Limit = cbLimit;
2283 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2284 pSReg->ValidSel = uSel;
2285 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2286 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2287 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2288
2289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2291 return VINF_SUCCESS;
2292}
2293
2294
2295/**
2296 * Performs a task switch.
2297 *
2298 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2299 * caller is responsible for performing the necessary checks (like DPL, TSS
2300 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2301 * reference for JMP, CALL, IRET.
2302 *
2303 * If the task switch is the due to a software interrupt or hardware exception,
2304 * the caller is responsible for validating the TSS selector and descriptor. See
2305 * Intel Instruction reference for INT n.
2306 *
2307 * @returns VBox strict status code.
2308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2309 * @param enmTaskSwitch The cause of the task switch.
2310 * @param uNextEip The EIP effective after the task switch.
2311 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2312 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2313 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2314 * @param SelTss The TSS selector of the new task.
2315 * @param pNewDescTss Pointer to the new TSS descriptor.
2316 */
2317VBOXSTRICTRC
2318iemTaskSwitch(PVMCPUCC pVCpu,
2319 IEMTASKSWITCH enmTaskSwitch,
2320 uint32_t uNextEip,
2321 uint32_t fFlags,
2322 uint16_t uErr,
2323 uint64_t uCr2,
2324 RTSEL SelTss,
2325 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2326{
2327 Assert(!IEM_IS_REAL_MODE(pVCpu));
2328 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2329 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2330
2331 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2332 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2333 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2334 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2335 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2336
2337 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2338 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2339
2340 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2341 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2342
2343 /* Update CR2 in case it's a page-fault. */
2344 /** @todo This should probably be done much earlier in IEM/PGM. See
2345 * @bugref{5653#c49}. */
2346 if (fFlags & IEM_XCPT_FLAGS_CR2)
2347 pVCpu->cpum.GstCtx.cr2 = uCr2;
2348
2349 /*
2350 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2351 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2352 */
2353 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2354 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2355 if (uNewTssLimit < uNewTssLimitMin)
2356 {
2357 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2358 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2359 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2360 }
2361
2362 /*
2363 * Task switches in VMX non-root mode always cause task switches.
2364 * The new TSS must have been read and validated (DPL, limits etc.) before a
2365 * task-switch VM-exit commences.
2366 *
2367 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2368 */
2369 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2370 {
2371 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2372 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2373 }
2374
2375 /*
2376 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2377 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2378 */
2379 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2380 {
2381 uint32_t const uExitInfo1 = SelTss;
2382 uint32_t uExitInfo2 = uErr;
2383 switch (enmTaskSwitch)
2384 {
2385 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2386 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2387 default: break;
2388 }
2389 if (fFlags & IEM_XCPT_FLAGS_ERR)
2390 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2391 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2392 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2393
2394 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2395 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2396 RT_NOREF2(uExitInfo1, uExitInfo2);
2397 }
2398
2399 /*
2400 * Check the current TSS limit. The last written byte to the current TSS during the
2401 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2402 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2403 *
2404 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2405 * end up with smaller than "legal" TSS limits.
2406 */
2407 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2408 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2409 if (uCurTssLimit < uCurTssLimitMin)
2410 {
2411 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2412 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2413 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2414 }
2415
2416 /*
2417 * Verify that the new TSS can be accessed and map it. Map only the required contents
2418 * and not the entire TSS.
2419 */
2420 uint8_t bUnmapInfoNewTss;
2421 void *pvNewTss;
2422 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2423 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2424 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2425 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2426 * not perform correct translation if this happens. See Intel spec. 7.2.1
2427 * "Task-State Segment". */
2428 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2429/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2430 * Consider wrapping the remainder into a function for simpler cleanup. */
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2434 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /*
2439 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2440 */
2441 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2442 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2443 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2444 {
2445 uint8_t bUnmapInfoDescCurTss;
2446 PX86DESC pDescCurTss;
2447 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2448 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2449 if (rcStrict != VINF_SUCCESS)
2450 {
2451 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2452 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2453 return rcStrict;
2454 }
2455
2456 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2457 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2461 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2466 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2467 {
2468 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2469 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2470 fEFlags &= ~X86_EFL_NT;
2471 }
2472 }
2473
2474 /*
2475 * Save the CPU state into the current TSS.
2476 */
2477 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2478 if (GCPtrNewTss == GCPtrCurTss)
2479 {
2480 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2481 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2482 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2483 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2484 pVCpu->cpum.GstCtx.ldtr.Sel));
2485 }
2486 if (fIsNewTss386)
2487 {
2488 /*
2489 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2490 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2491 */
2492 uint8_t bUnmapInfoCurTss32;
2493 void *pvCurTss32;
2494 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2495 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2496 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2497 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2498 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2499 if (rcStrict != VINF_SUCCESS)
2500 {
2501 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2502 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2503 return rcStrict;
2504 }
2505
2506 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2507 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2508 pCurTss32->eip = uNextEip;
2509 pCurTss32->eflags = fEFlags;
2510 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2511 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2512 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2513 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2514 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2515 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2516 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2517 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2518 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2519 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2520 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2521 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2522 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2523 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2524
2525 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2526 if (rcStrict != VINF_SUCCESS)
2527 {
2528 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2529 VBOXSTRICTRC_VAL(rcStrict)));
2530 return rcStrict;
2531 }
2532 }
2533 else
2534 {
2535 /*
2536 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2537 */
2538 uint8_t bUnmapInfoCurTss16;
2539 void *pvCurTss16;
2540 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2541 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2542 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2543 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2544 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2545 if (rcStrict != VINF_SUCCESS)
2546 {
2547 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2548 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2549 return rcStrict;
2550 }
2551
2552 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2553 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2554 pCurTss16->ip = uNextEip;
2555 pCurTss16->flags = (uint16_t)fEFlags;
2556 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2557 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2558 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2559 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2560 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2561 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2562 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2563 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2564 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2565 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2566 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2567 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2568
2569 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2570 if (rcStrict != VINF_SUCCESS)
2571 {
2572 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2573 VBOXSTRICTRC_VAL(rcStrict)));
2574 return rcStrict;
2575 }
2576 }
2577
2578 /*
2579 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2580 */
2581 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2582 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2583 {
2584 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2585 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2586 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2587 }
2588
2589 /*
2590 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2591 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2592 */
2593 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2594 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2595 bool fNewDebugTrap;
2596 if (fIsNewTss386)
2597 {
2598 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2599 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2600 uNewEip = pNewTss32->eip;
2601 uNewEflags = pNewTss32->eflags;
2602 uNewEax = pNewTss32->eax;
2603 uNewEcx = pNewTss32->ecx;
2604 uNewEdx = pNewTss32->edx;
2605 uNewEbx = pNewTss32->ebx;
2606 uNewEsp = pNewTss32->esp;
2607 uNewEbp = pNewTss32->ebp;
2608 uNewEsi = pNewTss32->esi;
2609 uNewEdi = pNewTss32->edi;
2610 uNewES = pNewTss32->es;
2611 uNewCS = pNewTss32->cs;
2612 uNewSS = pNewTss32->ss;
2613 uNewDS = pNewTss32->ds;
2614 uNewFS = pNewTss32->fs;
2615 uNewGS = pNewTss32->gs;
2616 uNewLdt = pNewTss32->selLdt;
2617 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2618 }
2619 else
2620 {
2621 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2622 uNewCr3 = 0;
2623 uNewEip = pNewTss16->ip;
2624 uNewEflags = pNewTss16->flags;
2625 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2626 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2627 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2628 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2629 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2630 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2631 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2632 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2633 uNewES = pNewTss16->es;
2634 uNewCS = pNewTss16->cs;
2635 uNewSS = pNewTss16->ss;
2636 uNewDS = pNewTss16->ds;
2637 uNewFS = 0;
2638 uNewGS = 0;
2639 uNewLdt = pNewTss16->selLdt;
2640 fNewDebugTrap = false;
2641 }
2642
2643 if (GCPtrNewTss == GCPtrCurTss)
2644 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2645 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2646
2647 /*
2648 * We're done accessing the new TSS.
2649 */
2650 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2651 if (rcStrict != VINF_SUCCESS)
2652 {
2653 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2654 return rcStrict;
2655 }
2656
2657 /*
2658 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2659 */
2660 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2661 {
2662 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2663 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2664 if (rcStrict != VINF_SUCCESS)
2665 {
2666 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2667 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2668 return rcStrict;
2669 }
2670
2671 /* Check that the descriptor indicates the new TSS is available (not busy). */
2672 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2673 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2674 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2675
2676 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2677 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2681 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683 }
2684 }
2685
2686 /*
2687 * From this point on, we're technically in the new task. We will defer exceptions
2688 * until the completion of the task switch but before executing any instructions in the new task.
2689 */
2690 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2691 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2692 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2693 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2694 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2695 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2696 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2697
2698 /* Set the busy bit in TR. */
2699 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2700
2701 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2702 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2703 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2704 {
2705 uNewEflags |= X86_EFL_NT;
2706 }
2707
2708 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2709 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2711
2712 pVCpu->cpum.GstCtx.eip = uNewEip;
2713 pVCpu->cpum.GstCtx.eax = uNewEax;
2714 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2715 pVCpu->cpum.GstCtx.edx = uNewEdx;
2716 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2717 pVCpu->cpum.GstCtx.esp = uNewEsp;
2718 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2719 pVCpu->cpum.GstCtx.esi = uNewEsi;
2720 pVCpu->cpum.GstCtx.edi = uNewEdi;
2721
2722 uNewEflags &= X86_EFL_LIVE_MASK;
2723 uNewEflags |= X86_EFL_RA1_MASK;
2724 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2725
2726 /*
2727 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2728 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2729 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2730 */
2731 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2732 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2733
2734 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2735 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2736
2737 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2738 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2739
2740 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2741 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2742
2743 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2744 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2745
2746 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2747 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2748 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2749
2750 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2751 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2752 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2753 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2754
2755 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2756 {
2757 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2758 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2759 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2760 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2761 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2762 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2763 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2764 }
2765
2766 /*
2767 * Switch CR3 for the new task.
2768 */
2769 if ( fIsNewTss386
2770 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2771 {
2772 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2773 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2774 AssertRCSuccessReturn(rc, rc);
2775
2776 /* Inform PGM. */
2777 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2778 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2779 AssertRCReturn(rc, rc);
2780 /* ignore informational status codes */
2781
2782 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2783 }
2784
2785 /*
2786 * Switch LDTR for the new task.
2787 */
2788 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2789 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2790 else
2791 {
2792 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2793
2794 IEMSELDESC DescNewLdt;
2795 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2796 if (rcStrict != VINF_SUCCESS)
2797 {
2798 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2799 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2800 return rcStrict;
2801 }
2802 if ( !DescNewLdt.Legacy.Gen.u1Present
2803 || DescNewLdt.Legacy.Gen.u1DescType
2804 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2805 {
2806 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2807 uNewLdt, DescNewLdt.Legacy.u));
2808 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2809 }
2810
2811 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2812 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2813 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2814 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2815 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2816 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2817 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2818 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2819 }
2820
2821 IEMSELDESC DescSS;
2822 if (IEM_IS_V86_MODE(pVCpu))
2823 {
2824 IEM_SET_CPL(pVCpu, 3);
2825 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2826 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2827 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2828 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2829 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2830 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2831
2832 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2833 DescSS.Legacy.u = 0;
2834 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2835 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2836 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2837 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2838 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2839 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2840 DescSS.Legacy.Gen.u2Dpl = 3;
2841 }
2842 else
2843 {
2844 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2845
2846 /*
2847 * Load the stack segment for the new task.
2848 */
2849 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2850 {
2851 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2852 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2853 }
2854
2855 /* Fetch the descriptor. */
2856 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2857 if (rcStrict != VINF_SUCCESS)
2858 {
2859 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2860 VBOXSTRICTRC_VAL(rcStrict)));
2861 return rcStrict;
2862 }
2863
2864 /* SS must be a data segment and writable. */
2865 if ( !DescSS.Legacy.Gen.u1DescType
2866 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2867 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2868 {
2869 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2870 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2875 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2876 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2877 {
2878 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2879 uNewCpl));
2880 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2881 }
2882
2883 /* Is it there? */
2884 if (!DescSS.Legacy.Gen.u1Present)
2885 {
2886 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2887 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2888 }
2889
2890 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2891 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2892
2893 /* Set the accessed bit before committing the result into SS. */
2894 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2895 {
2896 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2897 if (rcStrict != VINF_SUCCESS)
2898 return rcStrict;
2899 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2900 }
2901
2902 /* Commit SS. */
2903 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2904 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2905 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2906 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2907 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2908 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2909 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2910
2911 /* CPL has changed, update IEM before loading rest of segments. */
2912 IEM_SET_CPL(pVCpu, uNewCpl);
2913
2914 /*
2915 * Load the data segments for the new task.
2916 */
2917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2918 if (rcStrict != VINF_SUCCESS)
2919 return rcStrict;
2920 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2921 if (rcStrict != VINF_SUCCESS)
2922 return rcStrict;
2923 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2924 if (rcStrict != VINF_SUCCESS)
2925 return rcStrict;
2926 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2927 if (rcStrict != VINF_SUCCESS)
2928 return rcStrict;
2929
2930 /*
2931 * Load the code segment for the new task.
2932 */
2933 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2934 {
2935 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2936 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 /* Fetch the descriptor. */
2940 IEMSELDESC DescCS;
2941 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2942 if (rcStrict != VINF_SUCCESS)
2943 {
2944 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2945 return rcStrict;
2946 }
2947
2948 /* CS must be a code segment. */
2949 if ( !DescCS.Legacy.Gen.u1DescType
2950 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2951 {
2952 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2953 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2955 }
2956
2957 /* For conforming CS, DPL must be less than or equal to the RPL. */
2958 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2959 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2960 {
2961 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2962 DescCS.Legacy.Gen.u2Dpl));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* For non-conforming CS, DPL must match RPL. */
2967 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2968 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2969 {
2970 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2971 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2972 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2973 }
2974
2975 /* Is it there? */
2976 if (!DescCS.Legacy.Gen.u1Present)
2977 {
2978 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2979 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2980 }
2981
2982 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2983 u64Base = X86DESC_BASE(&DescCS.Legacy);
2984
2985 /* Set the accessed bit before committing the result into CS. */
2986 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2987 {
2988 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2989 if (rcStrict != VINF_SUCCESS)
2990 return rcStrict;
2991 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2992 }
2993
2994 /* Commit CS. */
2995 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2996 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2997 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2998 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2999 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3000 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3002 }
3003
3004 /* Make sure the CPU mode is correct. */
3005 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3006 if (fExecNew != pVCpu->iem.s.fExec)
3007 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3008 pVCpu->iem.s.fExec = fExecNew;
3009
3010 /** @todo Debug trap. */
3011 if (fIsNewTss386 && fNewDebugTrap)
3012 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3013
3014 /*
3015 * Construct the error code masks based on what caused this task switch.
3016 * See Intel Instruction reference for INT.
3017 */
3018 uint16_t uExt;
3019 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3020 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3021 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3022 uExt = 1;
3023 else
3024 uExt = 0;
3025
3026 /*
3027 * Push any error code on to the new stack.
3028 */
3029 if (fFlags & IEM_XCPT_FLAGS_ERR)
3030 {
3031 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3032 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3033 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3034
3035 /* Check that there is sufficient space on the stack. */
3036 /** @todo Factor out segment limit checking for normal/expand down segments
3037 * into a separate function. */
3038 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3039 {
3040 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3041 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3042 {
3043 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3044 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3045 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3046 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3047 }
3048 }
3049 else
3050 {
3051 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3052 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3053 {
3054 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3055 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3056 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3057 }
3058 }
3059
3060
3061 if (fIsNewTss386)
3062 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3063 else
3064 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3065 if (rcStrict != VINF_SUCCESS)
3066 {
3067 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3068 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3069 return rcStrict;
3070 }
3071 }
3072
3073 /* Check the new EIP against the new CS limit. */
3074 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3075 {
3076 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3077 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3078 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3079 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3080 }
3081
3082 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3083 pVCpu->cpum.GstCtx.ss.Sel));
3084 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3085}
3086
3087
3088/**
3089 * Implements exceptions and interrupts for protected mode.
3090 *
3091 * @returns VBox strict status code.
3092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3093 * @param cbInstr The number of bytes to offset rIP by in the return
3094 * address.
3095 * @param u8Vector The interrupt / exception vector number.
3096 * @param fFlags The flags.
3097 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3098 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3099 */
3100static VBOXSTRICTRC
3101iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3102 uint8_t cbInstr,
3103 uint8_t u8Vector,
3104 uint32_t fFlags,
3105 uint16_t uErr,
3106 uint64_t uCr2) RT_NOEXCEPT
3107{
3108 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3109
3110 /*
3111 * Read the IDT entry.
3112 */
3113 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3114 {
3115 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3116 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3117 }
3118 X86DESC Idte;
3119 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3120 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3121 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3122 {
3123 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3124 return rcStrict;
3125 }
3126 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3127 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3128 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3129 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3130
3131 /*
3132 * Check the descriptor type, DPL and such.
3133 * ASSUMES this is done in the same order as described for call-gate calls.
3134 */
3135 if (Idte.Gate.u1DescType)
3136 {
3137 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3138 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3139 }
3140 bool fTaskGate = false;
3141 uint8_t f32BitGate = true;
3142 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3143 switch (Idte.Gate.u4Type)
3144 {
3145 case X86_SEL_TYPE_SYS_UNDEFINED:
3146 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3147 case X86_SEL_TYPE_SYS_LDT:
3148 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3149 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3150 case X86_SEL_TYPE_SYS_UNDEFINED2:
3151 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3152 case X86_SEL_TYPE_SYS_UNDEFINED3:
3153 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3154 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3155 case X86_SEL_TYPE_SYS_UNDEFINED4:
3156 {
3157 /** @todo check what actually happens when the type is wrong...
3158 * esp. call gates. */
3159 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162
3163 case X86_SEL_TYPE_SYS_286_INT_GATE:
3164 f32BitGate = false;
3165 RT_FALL_THRU();
3166 case X86_SEL_TYPE_SYS_386_INT_GATE:
3167 fEflToClear |= X86_EFL_IF;
3168 break;
3169
3170 case X86_SEL_TYPE_SYS_TASK_GATE:
3171 fTaskGate = true;
3172#ifndef IEM_IMPLEMENTS_TASKSWITCH
3173 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3174#endif
3175 break;
3176
3177 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3178 f32BitGate = false;
3179 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3180 break;
3181
3182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3183 }
3184
3185 /* Check DPL against CPL if applicable. */
3186 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3187 {
3188 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3191 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3192 }
3193 }
3194
3195 /* Is it there? */
3196 if (!Idte.Gate.u1Present)
3197 {
3198 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3199 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3200 }
3201
3202 /* Is it a task-gate? */
3203 if (fTaskGate)
3204 {
3205 /*
3206 * Construct the error code masks based on what caused this task switch.
3207 * See Intel Instruction reference for INT.
3208 */
3209 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3210 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3211 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3212 RTSEL SelTss = Idte.Gate.u16Sel;
3213
3214 /*
3215 * Fetch the TSS descriptor in the GDT.
3216 */
3217 IEMSELDESC DescTSS;
3218 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3219 if (rcStrict != VINF_SUCCESS)
3220 {
3221 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3222 VBOXSTRICTRC_VAL(rcStrict)));
3223 return rcStrict;
3224 }
3225
3226 /* The TSS descriptor must be a system segment and be available (not busy). */
3227 if ( DescTSS.Legacy.Gen.u1DescType
3228 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3229 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3232 u8Vector, SelTss, DescTSS.Legacy.au64));
3233 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3234 }
3235
3236 /* The TSS must be present. */
3237 if (!DescTSS.Legacy.Gen.u1Present)
3238 {
3239 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3240 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3241 }
3242
3243 /* Do the actual task switch. */
3244 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3245 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3246 fFlags, uErr, uCr2, SelTss, &DescTSS);
3247 }
3248
3249 /* A null CS is bad. */
3250 RTSEL NewCS = Idte.Gate.u16Sel;
3251 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3254 return iemRaiseGeneralProtectionFault0(pVCpu);
3255 }
3256
3257 /* Fetch the descriptor for the new CS. */
3258 IEMSELDESC DescCS;
3259 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3260 if (rcStrict != VINF_SUCCESS)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3263 return rcStrict;
3264 }
3265
3266 /* Must be a code segment. */
3267 if (!DescCS.Legacy.Gen.u1DescType)
3268 {
3269 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3270 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3271 }
3272 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3275 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3276 }
3277
3278 /* Don't allow lowering the privilege level. */
3279 /** @todo Does the lowering of privileges apply to software interrupts
3280 * only? This has bearings on the more-privileged or
3281 * same-privilege stack behavior further down. A testcase would
3282 * be nice. */
3283 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3284 {
3285 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3286 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3287 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3288 }
3289
3290 /* Make sure the selector is present. */
3291 if (!DescCS.Legacy.Gen.u1Present)
3292 {
3293 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3294 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3295 }
3296
3297#ifdef LOG_ENABLED
3298 /* If software interrupt, try decode it if logging is enabled and such. */
3299 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3300 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3301 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3302#endif
3303
3304 /* Check the new EIP against the new CS limit. */
3305 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3306 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3307 ? Idte.Gate.u16OffsetLow
3308 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3309 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3310 if (uNewEip > cbLimitCS)
3311 {
3312 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3313 u8Vector, uNewEip, cbLimitCS, NewCS));
3314 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3315 }
3316 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3317
3318 /* Calc the flag image to push. */
3319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3321 fEfl &= ~X86_EFL_RF;
3322 else
3323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3324
3325 /* From V8086 mode only go to CPL 0. */
3326 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3327 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3328 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3329 {
3330 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3331 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3332 }
3333
3334 /*
3335 * If the privilege level changes, we need to get a new stack from the TSS.
3336 * This in turns means validating the new SS and ESP...
3337 */
3338 if (uNewCpl != IEM_GET_CPL(pVCpu))
3339 {
3340 RTSEL NewSS;
3341 uint32_t uNewEsp;
3342 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3343 if (rcStrict != VINF_SUCCESS)
3344 return rcStrict;
3345
3346 IEMSELDESC DescSS;
3347 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3348 if (rcStrict != VINF_SUCCESS)
3349 return rcStrict;
3350 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3351 if (!DescSS.Legacy.Gen.u1DefBig)
3352 {
3353 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3354 uNewEsp = (uint16_t)uNewEsp;
3355 }
3356
3357 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3358
3359 /* Check that there is sufficient space for the stack frame. */
3360 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3361 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3362 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3363 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3364
3365 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3366 {
3367 if ( uNewEsp - 1 > cbLimitSS
3368 || uNewEsp < cbStackFrame)
3369 {
3370 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3371 u8Vector, NewSS, uNewEsp, cbStackFrame));
3372 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3373 }
3374 }
3375 else
3376 {
3377 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3378 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3379 {
3380 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3381 u8Vector, NewSS, uNewEsp, cbStackFrame));
3382 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3383 }
3384 }
3385
3386 /*
3387 * Start making changes.
3388 */
3389
3390 /* Set the new CPL so that stack accesses use it. */
3391 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3392 IEM_SET_CPL(pVCpu, uNewCpl);
3393
3394 /* Create the stack frame. */
3395 uint8_t bUnmapInfoStackFrame;
3396 RTPTRUNION uStackFrame;
3397 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3398 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3399 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402 if (f32BitGate)
3403 {
3404 if (fFlags & IEM_XCPT_FLAGS_ERR)
3405 *uStackFrame.pu32++ = uErr;
3406 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3407 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3408 uStackFrame.pu32[2] = fEfl;
3409 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3410 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3411 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3412 if (fEfl & X86_EFL_VM)
3413 {
3414 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3415 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3416 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3417 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3418 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3419 }
3420 }
3421 else
3422 {
3423 if (fFlags & IEM_XCPT_FLAGS_ERR)
3424 *uStackFrame.pu16++ = uErr;
3425 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3426 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3427 uStackFrame.pu16[2] = fEfl;
3428 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3429 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3430 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3431 if (fEfl & X86_EFL_VM)
3432 {
3433 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3434 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3435 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3436 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3437 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3438 }
3439 }
3440 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3441 if (rcStrict != VINF_SUCCESS)
3442 return rcStrict;
3443
3444 /* Mark the selectors 'accessed' (hope this is the correct time). */
3445 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3446 * after pushing the stack frame? (Write protect the gdt + stack to
3447 * find out.) */
3448 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3449 {
3450 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3454 }
3455
3456 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3457 {
3458 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3459 if (rcStrict != VINF_SUCCESS)
3460 return rcStrict;
3461 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3462 }
3463
3464 /*
3465 * Start comitting the register changes (joins with the DPL=CPL branch).
3466 */
3467 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3468 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3469 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3470 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3471 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3472 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3473 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3474 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3475 * SP is loaded).
3476 * Need to check the other combinations too:
3477 * - 16-bit TSS, 32-bit handler
3478 * - 32-bit TSS, 16-bit handler */
3479 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3480 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3481 else
3482 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3483
3484 if (fEfl & X86_EFL_VM)
3485 {
3486 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3487 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3488 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3489 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3490 }
3491 }
3492 /*
3493 * Same privilege, no stack change and smaller stack frame.
3494 */
3495 else
3496 {
3497 uint64_t uNewRsp;
3498 uint8_t bUnmapInfoStackFrame;
3499 RTPTRUNION uStackFrame;
3500 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3501 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3502 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3503 if (rcStrict != VINF_SUCCESS)
3504 return rcStrict;
3505
3506 if (f32BitGate)
3507 {
3508 if (fFlags & IEM_XCPT_FLAGS_ERR)
3509 *uStackFrame.pu32++ = uErr;
3510 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3511 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3512 uStackFrame.pu32[2] = fEfl;
3513 }
3514 else
3515 {
3516 if (fFlags & IEM_XCPT_FLAGS_ERR)
3517 *uStackFrame.pu16++ = uErr;
3518 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3519 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3520 uStackFrame.pu16[2] = fEfl;
3521 }
3522 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525
3526 /* Mark the CS selector as 'accessed'. */
3527 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3528 {
3529 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3530 if (rcStrict != VINF_SUCCESS)
3531 return rcStrict;
3532 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3533 }
3534
3535 /*
3536 * Start committing the register changes (joins with the other branch).
3537 */
3538 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3539 }
3540
3541 /* ... register committing continues. */
3542 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3543 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3544 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3545 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3546 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3547 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3548
3549 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3550 fEfl &= ~fEflToClear;
3551 IEMMISC_SET_EFL(pVCpu, fEfl);
3552
3553 if (fFlags & IEM_XCPT_FLAGS_CR2)
3554 pVCpu->cpum.GstCtx.cr2 = uCr2;
3555
3556 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3557 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3558
3559 /* Make sure the execution flags are correct. */
3560 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3561 if (fExecNew != pVCpu->iem.s.fExec)
3562 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3563 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3564 pVCpu->iem.s.fExec = fExecNew;
3565 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3566
3567 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3568}
3569
3570
3571/**
3572 * Implements exceptions and interrupts for long mode.
3573 *
3574 * @returns VBox strict status code.
3575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3576 * @param cbInstr The number of bytes to offset rIP by in the return
3577 * address.
3578 * @param u8Vector The interrupt / exception vector number.
3579 * @param fFlags The flags.
3580 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3581 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3582 */
3583static VBOXSTRICTRC
3584iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3585 uint8_t cbInstr,
3586 uint8_t u8Vector,
3587 uint32_t fFlags,
3588 uint16_t uErr,
3589 uint64_t uCr2) RT_NOEXCEPT
3590{
3591 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3592
3593 /*
3594 * Read the IDT entry.
3595 */
3596 uint16_t offIdt = (uint16_t)u8Vector << 4;
3597 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3598 {
3599 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3600 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3601 }
3602 X86DESC64 Idte;
3603#ifdef _MSC_VER /* Shut up silly compiler warning. */
3604 Idte.au64[0] = 0;
3605 Idte.au64[1] = 0;
3606#endif
3607 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3609 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3610 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3611 {
3612 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3613 return rcStrict;
3614 }
3615 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3616 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3617 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3618
3619 /*
3620 * Check the descriptor type, DPL and such.
3621 * ASSUMES this is done in the same order as described for call-gate calls.
3622 */
3623 if (Idte.Gate.u1DescType)
3624 {
3625 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3626 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3627 }
3628 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3629 switch (Idte.Gate.u4Type)
3630 {
3631 case AMD64_SEL_TYPE_SYS_INT_GATE:
3632 fEflToClear |= X86_EFL_IF;
3633 break;
3634 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3635 break;
3636
3637 default:
3638 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3639 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3640 }
3641
3642 /* Check DPL against CPL if applicable. */
3643 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3644 {
3645 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3648 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3649 }
3650 }
3651
3652 /* Is it there? */
3653 if (!Idte.Gate.u1Present)
3654 {
3655 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3656 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658
3659 /* A null CS is bad. */
3660 RTSEL NewCS = Idte.Gate.u16Sel;
3661 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3662 {
3663 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3664 return iemRaiseGeneralProtectionFault0(pVCpu);
3665 }
3666
3667 /* Fetch the descriptor for the new CS. */
3668 IEMSELDESC DescCS;
3669 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3670 if (rcStrict != VINF_SUCCESS)
3671 {
3672 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3673 return rcStrict;
3674 }
3675
3676 /* Must be a 64-bit code segment. */
3677 if (!DescCS.Long.Gen.u1DescType)
3678 {
3679 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3680 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3681 }
3682 if ( !DescCS.Long.Gen.u1Long
3683 || DescCS.Long.Gen.u1DefBig
3684 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3687 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3688 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3689 }
3690
3691 /* Don't allow lowering the privilege level. For non-conforming CS
3692 selectors, the CS.DPL sets the privilege level the trap/interrupt
3693 handler runs at. For conforming CS selectors, the CPL remains
3694 unchanged, but the CS.DPL must be <= CPL. */
3695 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3696 * when CPU in Ring-0. Result \#GP? */
3697 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3700 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703
3704
3705 /* Make sure the selector is present. */
3706 if (!DescCS.Legacy.Gen.u1Present)
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3709 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3710 }
3711
3712 /* Check that the new RIP is canonical. */
3713 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3714 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3715 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3716 if (!IEM_IS_CANONICAL(uNewRip))
3717 {
3718 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3719 return iemRaiseGeneralProtectionFault0(pVCpu);
3720 }
3721
3722 /*
3723 * If the privilege level changes or if the IST isn't zero, we need to get
3724 * a new stack from the TSS.
3725 */
3726 uint64_t uNewRsp;
3727 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3728 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3729 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3730 || Idte.Gate.u3IST != 0)
3731 {
3732 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3733 if (rcStrict != VINF_SUCCESS)
3734 return rcStrict;
3735 }
3736 else
3737 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3738 uNewRsp &= ~(uint64_t)0xf;
3739
3740 /*
3741 * Calc the flag image to push.
3742 */
3743 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3744 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3745 fEfl &= ~X86_EFL_RF;
3746 else
3747 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3748
3749 /*
3750 * Start making changes.
3751 */
3752 /* Set the new CPL so that stack accesses use it. */
3753 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3754 IEM_SET_CPL(pVCpu, uNewCpl);
3755/** @todo Setting CPL this early seems wrong as it would affect and errors we
3756 * raise accessing the stack and (?) GDT/LDT... */
3757
3758 /* Create the stack frame. */
3759 uint8_t bUnmapInfoStackFrame;
3760 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3761 RTPTRUNION uStackFrame;
3762 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3763 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3764 if (rcStrict != VINF_SUCCESS)
3765 return rcStrict;
3766
3767 if (fFlags & IEM_XCPT_FLAGS_ERR)
3768 *uStackFrame.pu64++ = uErr;
3769 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3770 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3771 uStackFrame.pu64[2] = fEfl;
3772 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3773 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3774 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777
3778 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3779 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3780 * after pushing the stack frame? (Write protect the gdt + stack to
3781 * find out.) */
3782 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3783 {
3784 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3788 }
3789
3790 /*
3791 * Start comitting the register changes.
3792 */
3793 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3794 * hidden registers when interrupting 32-bit or 16-bit code! */
3795 if (uNewCpl != uOldCpl)
3796 {
3797 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3798 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3799 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3800 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3801 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3802 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3803 }
3804 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3805 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3806 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3807 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3808 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3809 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3810 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3811 pVCpu->cpum.GstCtx.rip = uNewRip;
3812
3813 fEfl &= ~fEflToClear;
3814 IEMMISC_SET_EFL(pVCpu, fEfl);
3815
3816 if (fFlags & IEM_XCPT_FLAGS_CR2)
3817 pVCpu->cpum.GstCtx.cr2 = uCr2;
3818
3819 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3820 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3821
3822 iemRecalcExecModeAndCplFlags(pVCpu);
3823
3824 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3825}
3826
3827
3828/**
3829 * Implements exceptions and interrupts.
3830 *
3831 * All exceptions and interrupts goes thru this function!
3832 *
3833 * @returns VBox strict status code.
3834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3835 * @param cbInstr The number of bytes to offset rIP by in the return
3836 * address.
3837 * @param u8Vector The interrupt / exception vector number.
3838 * @param fFlags The flags.
3839 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3840 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3841 */
3842VBOXSTRICTRC
3843iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3844 uint8_t cbInstr,
3845 uint8_t u8Vector,
3846 uint32_t fFlags,
3847 uint16_t uErr,
3848 uint64_t uCr2) RT_NOEXCEPT
3849{
3850 /*
3851 * Get all the state that we might need here.
3852 */
3853 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3854 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3855
3856#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3857 /*
3858 * Flush prefetch buffer
3859 */
3860 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3861#endif
3862
3863 /*
3864 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3865 */
3866 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3867 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3868 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3869 | IEM_XCPT_FLAGS_BP_INSTR
3870 | IEM_XCPT_FLAGS_ICEBP_INSTR
3871 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3872 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3873 {
3874 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3875 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3876 u8Vector = X86_XCPT_GP;
3877 uErr = 0;
3878 }
3879
3880 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3881#ifdef DBGFTRACE_ENABLED
3882 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3883 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3884 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3885#endif
3886
3887 /*
3888 * Check if DBGF wants to intercept the exception.
3889 */
3890 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3891 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3892 { /* likely */ }
3893 else
3894 {
3895 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3896 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3897 if (rcStrict != VINF_SUCCESS)
3898 return rcStrict;
3899 }
3900
3901 /*
3902 * Evaluate whether NMI blocking should be in effect.
3903 * Normally, NMI blocking is in effect whenever we inject an NMI.
3904 */
3905 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3906 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3907
3908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3909 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3910 {
3911 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3912 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3913 return rcStrict0;
3914
3915 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3916 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3917 {
3918 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3919 fBlockNmi = false;
3920 }
3921 }
3922#endif
3923
3924#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3925 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3926 {
3927 /*
3928 * If the event is being injected as part of VMRUN, it isn't subject to event
3929 * intercepts in the nested-guest. However, secondary exceptions that occur
3930 * during injection of any event -are- subject to exception intercepts.
3931 *
3932 * See AMD spec. 15.20 "Event Injection".
3933 */
3934 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3935 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3936 else
3937 {
3938 /*
3939 * Check and handle if the event being raised is intercepted.
3940 */
3941 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3942 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3943 return rcStrict0;
3944 }
3945 }
3946#endif
3947
3948 /*
3949 * Set NMI blocking if necessary.
3950 */
3951 if (fBlockNmi)
3952 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3953
3954 /*
3955 * Do recursion accounting.
3956 */
3957 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3958 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3959 if (pVCpu->iem.s.cXcptRecursions == 0)
3960 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3961 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3962 else
3963 {
3964 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3965 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3966 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3967
3968 if (pVCpu->iem.s.cXcptRecursions >= 4)
3969 {
3970#ifdef DEBUG_bird
3971 AssertFailed();
3972#endif
3973 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3974 }
3975
3976 /*
3977 * Evaluate the sequence of recurring events.
3978 */
3979 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3980 NULL /* pXcptRaiseInfo */);
3981 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3982 { /* likely */ }
3983 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3984 {
3985 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3986 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3987 u8Vector = X86_XCPT_DF;
3988 uErr = 0;
3989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3990 /* VMX nested-guest #DF intercept needs to be checked here. */
3991 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3992 {
3993 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3994 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3995 return rcStrict0;
3996 }
3997#endif
3998 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3999 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4000 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4001 }
4002 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4003 {
4004 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4005 return iemInitiateCpuShutdown(pVCpu);
4006 }
4007 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4008 {
4009 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4010 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4011 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4012 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4013 return VERR_EM_GUEST_CPU_HANG;
4014 }
4015 else
4016 {
4017 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4018 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4019 return VERR_IEM_IPE_9;
4020 }
4021
4022 /*
4023 * The 'EXT' bit is set when an exception occurs during deliver of an external
4024 * event (such as an interrupt or earlier exception)[1]. Privileged software
4025 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4026 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4027 *
4028 * [1] - Intel spec. 6.13 "Error Code"
4029 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4030 * [3] - Intel Instruction reference for INT n.
4031 */
4032 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4033 && (fFlags & IEM_XCPT_FLAGS_ERR)
4034 && u8Vector != X86_XCPT_PF
4035 && u8Vector != X86_XCPT_DF)
4036 {
4037 uErr |= X86_TRAP_ERR_EXTERNAL;
4038 }
4039 }
4040
4041 pVCpu->iem.s.cXcptRecursions++;
4042 pVCpu->iem.s.uCurXcpt = u8Vector;
4043 pVCpu->iem.s.fCurXcpt = fFlags;
4044 pVCpu->iem.s.uCurXcptErr = uErr;
4045 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4046
4047 /*
4048 * Extensive logging.
4049 */
4050#if defined(LOG_ENABLED) && defined(IN_RING3)
4051 if (LogIs3Enabled())
4052 {
4053 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4054 char szRegs[4096];
4055 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4056 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4057 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4058 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4059 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4060 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4061 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4062 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4063 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4064 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4065 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4066 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4067 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4068 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4069 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4070 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4071 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4072 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4073 " efer=%016VR{efer}\n"
4074 " pat=%016VR{pat}\n"
4075 " sf_mask=%016VR{sf_mask}\n"
4076 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4077 " lstar=%016VR{lstar}\n"
4078 " star=%016VR{star} cstar=%016VR{cstar}\n"
4079 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4080 );
4081
4082 char szInstr[256];
4083 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4084 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4085 szInstr, sizeof(szInstr), NULL);
4086 Log3(("%s%s\n", szRegs, szInstr));
4087 }
4088#endif /* LOG_ENABLED */
4089
4090 /*
4091 * Stats.
4092 */
4093 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4094 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4095 else if (u8Vector <= X86_XCPT_LAST)
4096 {
4097 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4098 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4099 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4100 }
4101
4102 /*
4103 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4104 * to ensure that a stale TLB or paging cache entry will only cause one
4105 * spurious #PF.
4106 */
4107 if ( u8Vector == X86_XCPT_PF
4108 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4109 IEMTlbInvalidatePage(pVCpu, uCr2);
4110
4111 /*
4112 * Call the mode specific worker function.
4113 */
4114 VBOXSTRICTRC rcStrict;
4115 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4116 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4117 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4118 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4119 else
4120 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4121
4122 /* Flush the prefetch buffer. */
4123 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4124
4125 /*
4126 * Unwind.
4127 */
4128 pVCpu->iem.s.cXcptRecursions--;
4129 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4130 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4131 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4132 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4133 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4134 return rcStrict;
4135}
4136
4137#ifdef IEM_WITH_SETJMP
4138/**
4139 * See iemRaiseXcptOrInt. Will not return.
4140 */
4141DECL_NO_RETURN(void)
4142iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4143 uint8_t cbInstr,
4144 uint8_t u8Vector,
4145 uint32_t fFlags,
4146 uint16_t uErr,
4147 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4148{
4149 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4150 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4151}
4152#endif
4153
4154
4155/** \#DE - 00. */
4156VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4159}
4160
4161
4162/** \#DB - 01.
4163 * @note This automatically clear DR7.GD. */
4164VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4165{
4166 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4167 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4169}
4170
4171
4172/** \#BR - 05. */
4173VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4174{
4175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4176}
4177
4178
4179/** \#UD - 06. */
4180VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4181{
4182 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4183}
4184
4185
4186/** \#NM - 07. */
4187VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4188{
4189 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4190}
4191
4192
4193/** \#TS(err) - 0a. */
4194VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4195{
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4197}
4198
4199
4200/** \#TS(tr) - 0a. */
4201VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4204 pVCpu->cpum.GstCtx.tr.Sel, 0);
4205}
4206
4207
4208/** \#TS(0) - 0a. */
4209VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4210{
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4212 0, 0);
4213}
4214
4215
4216/** \#TS(err) - 0a. */
4217VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4218{
4219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4220 uSel & X86_SEL_MASK_OFF_RPL, 0);
4221}
4222
4223
4224/** \#NP(err) - 0b. */
4225VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4226{
4227 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4228}
4229
4230
4231/** \#NP(sel) - 0b. */
4232VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4233{
4234 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4237 uSel & ~X86_SEL_RPL, 0);
4238}
4239
4240
4241/** \#SS(seg) - 0c. */
4242VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4243{
4244 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4247 uSel & ~X86_SEL_RPL, 0);
4248}
4249
4250
4251/** \#SS(err) - 0c. */
4252VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4253{
4254 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4255 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4257}
4258
4259
4260/** \#GP(n) - 0d. */
4261VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4262{
4263 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4265}
4266
4267
4268/** \#GP(0) - 0d. */
4269VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4270{
4271 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4272 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4273}
4274
4275#ifdef IEM_WITH_SETJMP
4276/** \#GP(0) - 0d. */
4277DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4278{
4279 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4280 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4281}
4282#endif
4283
4284
4285/** \#GP(sel) - 0d. */
4286VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4287{
4288 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4289 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4290 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4291 Sel & ~X86_SEL_RPL, 0);
4292}
4293
4294
4295/** \#GP(0) - 0d. */
4296VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4297{
4298 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4300}
4301
4302
4303/** \#GP(sel) - 0d. */
4304VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4308 NOREF(iSegReg); NOREF(fAccess);
4309 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4310 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4311}
4312
4313#ifdef IEM_WITH_SETJMP
4314/** \#GP(sel) - 0d, longjmp. */
4315DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4316{
4317 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4318 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4319 NOREF(iSegReg); NOREF(fAccess);
4320 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4321 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4322}
4323#endif
4324
4325/** \#GP(sel) - 0d. */
4326VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4327{
4328 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4329 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4330 NOREF(Sel);
4331 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4332}
4333
4334#ifdef IEM_WITH_SETJMP
4335/** \#GP(sel) - 0d, longjmp. */
4336DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4337{
4338 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4339 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4340 NOREF(Sel);
4341 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4342}
4343#endif
4344
4345
4346/** \#GP(sel) - 0d. */
4347VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4348{
4349 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4350 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4351 NOREF(iSegReg); NOREF(fAccess);
4352 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4353}
4354
4355#ifdef IEM_WITH_SETJMP
4356/** \#GP(sel) - 0d, longjmp. */
4357DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4358{
4359 NOREF(iSegReg); NOREF(fAccess);
4360 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4361}
4362#endif
4363
4364
4365/** \#PF(n) - 0e. */
4366VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4367{
4368 uint16_t uErr;
4369 switch (rc)
4370 {
4371 case VERR_PAGE_NOT_PRESENT:
4372 case VERR_PAGE_TABLE_NOT_PRESENT:
4373 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4374 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4375 uErr = 0;
4376 break;
4377
4378 default:
4379 AssertMsgFailed(("%Rrc\n", rc));
4380 RT_FALL_THRU();
4381 case VERR_ACCESS_DENIED:
4382 uErr = X86_TRAP_PF_P;
4383 break;
4384
4385 /** @todo reserved */
4386 }
4387
4388 if (IEM_GET_CPL(pVCpu) == 3)
4389 uErr |= X86_TRAP_PF_US;
4390
4391 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4392 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4393 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4394 uErr |= X86_TRAP_PF_ID;
4395
4396#if 0 /* This is so much non-sense, really. Why was it done like that? */
4397 /* Note! RW access callers reporting a WRITE protection fault, will clear
4398 the READ flag before calling. So, read-modify-write accesses (RW)
4399 can safely be reported as READ faults. */
4400 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4401 uErr |= X86_TRAP_PF_RW;
4402#else
4403 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4404 {
4405 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4406 /// (regardless of outcome of the comparison in the latter case).
4407 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4408 uErr |= X86_TRAP_PF_RW;
4409 }
4410#endif
4411
4412 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4413 of the memory operand rather than at the start of it. (Not sure what
4414 happens if it crosses a page boundrary.) The current heuristics for
4415 this is to report the #PF for the last byte if the access is more than
4416 64 bytes. This is probably not correct, but we can work that out later,
4417 main objective now is to get FXSAVE to work like for real hardware and
4418 make bs3-cpu-basic2 work. */
4419 if (cbAccess <= 64)
4420 { /* likely*/ }
4421 else
4422 GCPtrWhere += cbAccess - 1;
4423
4424 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4425 uErr, GCPtrWhere);
4426}
4427
4428#ifdef IEM_WITH_SETJMP
4429/** \#PF(n) - 0e, longjmp. */
4430DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4431 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4432{
4433 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4434}
4435#endif
4436
4437
4438/** \#MF(0) - 10. */
4439VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4440{
4441 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4442 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4443
4444 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4445 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4446 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4447}
4448
4449
4450/** \#AC(0) - 11. */
4451VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4452{
4453 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4454}
4455
4456#ifdef IEM_WITH_SETJMP
4457/** \#AC(0) - 11, longjmp. */
4458DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4459{
4460 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4461}
4462#endif
4463
4464
4465/** \#XF(0)/\#XM(0) - 19. */
4466VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4467{
4468 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4469}
4470
4471
4472/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4473IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4474{
4475 NOREF(cbInstr);
4476 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4477}
4478
4479
4480/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4481IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4482{
4483 NOREF(cbInstr);
4484 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4485}
4486
4487
4488/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4489IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4490{
4491 NOREF(cbInstr);
4492 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4493}
4494
4495
4496/** @} */
4497
4498/** @name Common opcode decoders.
4499 * @{
4500 */
4501//#include <iprt/mem.h>
4502
4503/**
4504 * Used to add extra details about a stub case.
4505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4506 */
4507void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4508{
4509#if defined(LOG_ENABLED) && defined(IN_RING3)
4510 PVM pVM = pVCpu->CTX_SUFF(pVM);
4511 char szRegs[4096];
4512 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4513 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4514 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4515 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4516 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4517 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4518 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4519 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4520 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4521 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4522 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4523 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4524 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4525 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4526 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4527 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4528 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4529 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4530 " efer=%016VR{efer}\n"
4531 " pat=%016VR{pat}\n"
4532 " sf_mask=%016VR{sf_mask}\n"
4533 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4534 " lstar=%016VR{lstar}\n"
4535 " star=%016VR{star} cstar=%016VR{cstar}\n"
4536 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4537 );
4538
4539 char szInstr[256];
4540 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4541 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4542 szInstr, sizeof(szInstr), NULL);
4543
4544 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4545#else
4546 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4547#endif
4548}
4549
4550/** @} */
4551
4552
4553
4554/** @name Register Access.
4555 * @{
4556 */
4557
4558/**
4559 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4560 *
4561 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4562 * segment limit.
4563 *
4564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4565 * @param cbInstr Instruction size.
4566 * @param offNextInstr The offset of the next instruction.
4567 * @param enmEffOpSize Effective operand size.
4568 */
4569VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4570 IEMMODE enmEffOpSize) RT_NOEXCEPT
4571{
4572 switch (enmEffOpSize)
4573 {
4574 case IEMMODE_16BIT:
4575 {
4576 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4577 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4578 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4579 pVCpu->cpum.GstCtx.rip = uNewIp;
4580 else
4581 return iemRaiseGeneralProtectionFault0(pVCpu);
4582 break;
4583 }
4584
4585 case IEMMODE_32BIT:
4586 {
4587 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4588 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4589
4590 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4591 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4592 pVCpu->cpum.GstCtx.rip = uNewEip;
4593 else
4594 return iemRaiseGeneralProtectionFault0(pVCpu);
4595 break;
4596 }
4597
4598 case IEMMODE_64BIT:
4599 {
4600 Assert(IEM_IS_64BIT_CODE(pVCpu));
4601
4602 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4603 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4604 pVCpu->cpum.GstCtx.rip = uNewRip;
4605 else
4606 return iemRaiseGeneralProtectionFault0(pVCpu);
4607 break;
4608 }
4609
4610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4611 }
4612
4613#ifndef IEM_WITH_CODE_TLB
4614 /* Flush the prefetch buffer. */
4615 pVCpu->iem.s.cbOpcode = cbInstr;
4616#endif
4617
4618 /*
4619 * Clear RF and finish the instruction (maybe raise #DB).
4620 */
4621 return iemRegFinishClearingRF(pVCpu);
4622}
4623
4624
4625/**
4626 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4627 *
4628 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4629 * segment limit.
4630 *
4631 * @returns Strict VBox status code.
4632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4633 * @param cbInstr Instruction size.
4634 * @param offNextInstr The offset of the next instruction.
4635 */
4636VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4637{
4638 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4639
4640 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4641 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4642 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4643 pVCpu->cpum.GstCtx.rip = uNewIp;
4644 else
4645 return iemRaiseGeneralProtectionFault0(pVCpu);
4646
4647#ifndef IEM_WITH_CODE_TLB
4648 /* Flush the prefetch buffer. */
4649 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4650#endif
4651
4652 /*
4653 * Clear RF and finish the instruction (maybe raise #DB).
4654 */
4655 return iemRegFinishClearingRF(pVCpu);
4656}
4657
4658
4659/**
4660 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4661 *
4662 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4663 * segment limit.
4664 *
4665 * @returns Strict VBox status code.
4666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4667 * @param cbInstr Instruction size.
4668 * @param offNextInstr The offset of the next instruction.
4669 * @param enmEffOpSize Effective operand size.
4670 */
4671VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4672 IEMMODE enmEffOpSize) RT_NOEXCEPT
4673{
4674 if (enmEffOpSize == IEMMODE_32BIT)
4675 {
4676 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4677
4678 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4679 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4680 pVCpu->cpum.GstCtx.rip = uNewEip;
4681 else
4682 return iemRaiseGeneralProtectionFault0(pVCpu);
4683 }
4684 else
4685 {
4686 Assert(enmEffOpSize == IEMMODE_64BIT);
4687
4688 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4689 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4690 pVCpu->cpum.GstCtx.rip = uNewRip;
4691 else
4692 return iemRaiseGeneralProtectionFault0(pVCpu);
4693 }
4694
4695#ifndef IEM_WITH_CODE_TLB
4696 /* Flush the prefetch buffer. */
4697 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4698#endif
4699
4700 /*
4701 * Clear RF and finish the instruction (maybe raise #DB).
4702 */
4703 return iemRegFinishClearingRF(pVCpu);
4704}
4705
4706
4707/**
4708 * Performs a near jump to the specified address.
4709 *
4710 * May raise a \#GP(0) if the new IP outside the code segment limit.
4711 *
4712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4713 * @param uNewIp The new IP value.
4714 */
4715VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4716{
4717 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4718 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4719 pVCpu->cpum.GstCtx.rip = uNewIp;
4720 else
4721 return iemRaiseGeneralProtectionFault0(pVCpu);
4722 /** @todo Test 16-bit jump in 64-bit mode. */
4723
4724#ifndef IEM_WITH_CODE_TLB
4725 /* Flush the prefetch buffer. */
4726 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4727#endif
4728
4729 /*
4730 * Clear RF and finish the instruction (maybe raise #DB).
4731 */
4732 return iemRegFinishClearingRF(pVCpu);
4733}
4734
4735
4736/**
4737 * Performs a near jump to the specified address.
4738 *
4739 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4740 *
4741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4742 * @param uNewEip The new EIP value.
4743 */
4744VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4745{
4746 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4747 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4748
4749 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4750 pVCpu->cpum.GstCtx.rip = uNewEip;
4751 else
4752 return iemRaiseGeneralProtectionFault0(pVCpu);
4753
4754#ifndef IEM_WITH_CODE_TLB
4755 /* Flush the prefetch buffer. */
4756 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4757#endif
4758
4759 /*
4760 * Clear RF and finish the instruction (maybe raise #DB).
4761 */
4762 return iemRegFinishClearingRF(pVCpu);
4763}
4764
4765
4766/**
4767 * Performs a near jump to the specified address.
4768 *
4769 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4770 * segment limit.
4771 *
4772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4773 * @param uNewRip The new RIP value.
4774 */
4775VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4776{
4777 Assert(IEM_IS_64BIT_CODE(pVCpu));
4778
4779 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4780 pVCpu->cpum.GstCtx.rip = uNewRip;
4781 else
4782 return iemRaiseGeneralProtectionFault0(pVCpu);
4783
4784#ifndef IEM_WITH_CODE_TLB
4785 /* Flush the prefetch buffer. */
4786 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4787#endif
4788
4789 /*
4790 * Clear RF and finish the instruction (maybe raise #DB).
4791 */
4792 return iemRegFinishClearingRF(pVCpu);
4793}
4794
4795/** @} */
4796
4797
4798/** @name FPU access and helpers.
4799 *
4800 * @{
4801 */
4802
4803/**
4804 * Updates the x87.DS and FPUDP registers.
4805 *
4806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4807 * @param pFpuCtx The FPU context.
4808 * @param iEffSeg The effective segment register.
4809 * @param GCPtrEff The effective address relative to @a iEffSeg.
4810 */
4811DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4812{
4813 RTSEL sel;
4814 switch (iEffSeg)
4815 {
4816 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4817 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4818 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4819 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4820 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4821 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4822 default:
4823 AssertMsgFailed(("%d\n", iEffSeg));
4824 sel = pVCpu->cpum.GstCtx.ds.Sel;
4825 }
4826 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4827 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4828 {
4829 pFpuCtx->DS = 0;
4830 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4831 }
4832 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4833 {
4834 pFpuCtx->DS = sel;
4835 pFpuCtx->FPUDP = GCPtrEff;
4836 }
4837 else
4838 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4839}
4840
4841
4842/**
4843 * Rotates the stack registers in the push direction.
4844 *
4845 * @param pFpuCtx The FPU context.
4846 * @remarks This is a complete waste of time, but fxsave stores the registers in
4847 * stack order.
4848 */
4849DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4850{
4851 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4852 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4853 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4854 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4855 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4856 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4857 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4858 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4859 pFpuCtx->aRegs[0].r80 = r80Tmp;
4860}
4861
4862
4863/**
4864 * Rotates the stack registers in the pop direction.
4865 *
4866 * @param pFpuCtx The FPU context.
4867 * @remarks This is a complete waste of time, but fxsave stores the registers in
4868 * stack order.
4869 */
4870DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4871{
4872 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4873 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4874 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4875 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4876 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4877 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4878 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4879 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4880 pFpuCtx->aRegs[7].r80 = r80Tmp;
4881}
4882
4883
4884/**
4885 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4886 * exception prevents it.
4887 *
4888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4889 * @param pResult The FPU operation result to push.
4890 * @param pFpuCtx The FPU context.
4891 */
4892static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4893{
4894 /* Update FSW and bail if there are pending exceptions afterwards. */
4895 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4896 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4897 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4898 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4899 {
4900 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4901 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4902 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4903 pFpuCtx->FSW = fFsw;
4904 return;
4905 }
4906
4907 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4908 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4909 {
4910 /* All is fine, push the actual value. */
4911 pFpuCtx->FTW |= RT_BIT(iNewTop);
4912 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4913 }
4914 else if (pFpuCtx->FCW & X86_FCW_IM)
4915 {
4916 /* Masked stack overflow, push QNaN. */
4917 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4918 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4919 }
4920 else
4921 {
4922 /* Raise stack overflow, don't push anything. */
4923 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4924 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4925 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4926 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4927 return;
4928 }
4929
4930 fFsw &= ~X86_FSW_TOP_MASK;
4931 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4932 pFpuCtx->FSW = fFsw;
4933
4934 iemFpuRotateStackPush(pFpuCtx);
4935 RT_NOREF(pVCpu);
4936}
4937
4938
4939/**
4940 * Stores a result in a FPU register and updates the FSW and FTW.
4941 *
4942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4943 * @param pFpuCtx The FPU context.
4944 * @param pResult The result to store.
4945 * @param iStReg Which FPU register to store it in.
4946 */
4947static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4948{
4949 Assert(iStReg < 8);
4950 uint16_t fNewFsw = pFpuCtx->FSW;
4951 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4952 fNewFsw &= ~X86_FSW_C_MASK;
4953 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4954 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4955 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4956 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4957 pFpuCtx->FSW = fNewFsw;
4958 pFpuCtx->FTW |= RT_BIT(iReg);
4959 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4960 RT_NOREF(pVCpu);
4961}
4962
4963
4964/**
4965 * Only updates the FPU status word (FSW) with the result of the current
4966 * instruction.
4967 *
4968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4969 * @param pFpuCtx The FPU context.
4970 * @param u16FSW The FSW output of the current instruction.
4971 */
4972static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4973{
4974 uint16_t fNewFsw = pFpuCtx->FSW;
4975 fNewFsw &= ~X86_FSW_C_MASK;
4976 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4977 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4978 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4979 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4980 pFpuCtx->FSW = fNewFsw;
4981 RT_NOREF(pVCpu);
4982}
4983
4984
4985/**
4986 * Pops one item off the FPU stack if no pending exception prevents it.
4987 *
4988 * @param pFpuCtx The FPU context.
4989 */
4990static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4991{
4992 /* Check pending exceptions. */
4993 uint16_t uFSW = pFpuCtx->FSW;
4994 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4995 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4996 return;
4997
4998 /* TOP--. */
4999 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5000 uFSW &= ~X86_FSW_TOP_MASK;
5001 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5002 pFpuCtx->FSW = uFSW;
5003
5004 /* Mark the previous ST0 as empty. */
5005 iOldTop >>= X86_FSW_TOP_SHIFT;
5006 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5007
5008 /* Rotate the registers. */
5009 iemFpuRotateStackPop(pFpuCtx);
5010}
5011
5012
5013/**
5014 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5015 *
5016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5017 * @param pResult The FPU operation result to push.
5018 * @param uFpuOpcode The FPU opcode value.
5019 */
5020void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5021{
5022 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5023 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5024 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5025}
5026
5027
5028/**
5029 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5030 * and sets FPUDP and FPUDS.
5031 *
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param pResult The FPU operation result to push.
5034 * @param iEffSeg The effective segment register.
5035 * @param GCPtrEff The effective address relative to @a iEffSeg.
5036 * @param uFpuOpcode The FPU opcode value.
5037 */
5038void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5039 uint16_t uFpuOpcode) RT_NOEXCEPT
5040{
5041 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5042 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5043 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5044 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5045}
5046
5047
5048/**
5049 * Replace ST0 with the first value and push the second onto the FPU stack,
5050 * unless a pending exception prevents it.
5051 *
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param pResult The FPU operation result to store and push.
5054 * @param uFpuOpcode The FPU opcode value.
5055 */
5056void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5057{
5058 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5059 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5060
5061 /* Update FSW and bail if there are pending exceptions afterwards. */
5062 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5063 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5064 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5065 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5066 {
5067 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5068 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5069 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5070 pFpuCtx->FSW = fFsw;
5071 return;
5072 }
5073
5074 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5075 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5076 {
5077 /* All is fine, push the actual value. */
5078 pFpuCtx->FTW |= RT_BIT(iNewTop);
5079 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5080 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5081 }
5082 else if (pFpuCtx->FCW & X86_FCW_IM)
5083 {
5084 /* Masked stack overflow, push QNaN. */
5085 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5086 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5087 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5088 }
5089 else
5090 {
5091 /* Raise stack overflow, don't push anything. */
5092 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5093 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5094 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5095 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5096 return;
5097 }
5098
5099 fFsw &= ~X86_FSW_TOP_MASK;
5100 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5101 pFpuCtx->FSW = fFsw;
5102
5103 iemFpuRotateStackPush(pFpuCtx);
5104}
5105
5106
5107/**
5108 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5109 * FOP.
5110 *
5111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5112 * @param pResult The result to store.
5113 * @param iStReg Which FPU register to store it in.
5114 * @param uFpuOpcode The FPU opcode value.
5115 */
5116void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5117{
5118 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5119 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5120 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5121}
5122
5123
5124/**
5125 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5126 * FOP, and then pops the stack.
5127 *
5128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5129 * @param pResult The result to store.
5130 * @param iStReg Which FPU register to store it in.
5131 * @param uFpuOpcode The FPU opcode value.
5132 */
5133void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5134{
5135 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5136 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5137 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5138 iemFpuMaybePopOne(pFpuCtx);
5139}
5140
5141
5142/**
5143 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5144 * FPUDP, and FPUDS.
5145 *
5146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5147 * @param pResult The result to store.
5148 * @param iStReg Which FPU register to store it in.
5149 * @param iEffSeg The effective memory operand selector register.
5150 * @param GCPtrEff The effective memory operand offset.
5151 * @param uFpuOpcode The FPU opcode value.
5152 */
5153void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5154 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5155{
5156 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5157 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5158 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5159 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5160}
5161
5162
5163/**
5164 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5165 * FPUDP, and FPUDS, and then pops the stack.
5166 *
5167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5168 * @param pResult The result to store.
5169 * @param iStReg Which FPU register to store it in.
5170 * @param iEffSeg The effective memory operand selector register.
5171 * @param GCPtrEff The effective memory operand offset.
5172 * @param uFpuOpcode The FPU opcode value.
5173 */
5174void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5175 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5176{
5177 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5178 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5179 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5180 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5181 iemFpuMaybePopOne(pFpuCtx);
5182}
5183
5184
5185/**
5186 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5187 *
5188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5189 * @param uFpuOpcode The FPU opcode value.
5190 */
5191void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5192{
5193 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5194 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5195}
5196
5197
5198/**
5199 * Updates the FSW, FOP, FPUIP, and FPUCS.
5200 *
5201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5202 * @param u16FSW The FSW from the current instruction.
5203 * @param uFpuOpcode The FPU opcode value.
5204 */
5205void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5209 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5210}
5211
5212
5213/**
5214 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5215 *
5216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5217 * @param u16FSW The FSW from the current instruction.
5218 * @param uFpuOpcode The FPU opcode value.
5219 */
5220void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5221{
5222 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5223 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5224 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5225 iemFpuMaybePopOne(pFpuCtx);
5226}
5227
5228
5229/**
5230 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5231 *
5232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5233 * @param u16FSW The FSW from the current instruction.
5234 * @param iEffSeg The effective memory operand selector register.
5235 * @param GCPtrEff The effective memory operand offset.
5236 * @param uFpuOpcode The FPU opcode value.
5237 */
5238void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5239{
5240 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5241 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5242 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5243 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5244}
5245
5246
5247/**
5248 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5249 *
5250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5251 * @param u16FSW The FSW from the current instruction.
5252 * @param uFpuOpcode The FPU opcode value.
5253 */
5254void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5255{
5256 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5257 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5258 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5259 iemFpuMaybePopOne(pFpuCtx);
5260 iemFpuMaybePopOne(pFpuCtx);
5261}
5262
5263
5264/**
5265 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5266 *
5267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5268 * @param u16FSW The FSW from the current instruction.
5269 * @param iEffSeg The effective memory operand selector register.
5270 * @param GCPtrEff The effective memory operand offset.
5271 * @param uFpuOpcode The FPU opcode value.
5272 */
5273void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5274 uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5278 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5279 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5280 iemFpuMaybePopOne(pFpuCtx);
5281}
5282
5283
5284/**
5285 * Worker routine for raising an FPU stack underflow exception.
5286 *
5287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5288 * @param pFpuCtx The FPU context.
5289 * @param iStReg The stack register being accessed.
5290 */
5291static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5292{
5293 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5294 if (pFpuCtx->FCW & X86_FCW_IM)
5295 {
5296 /* Masked underflow. */
5297 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5298 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5299 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5300 if (iStReg != UINT8_MAX)
5301 {
5302 pFpuCtx->FTW |= RT_BIT(iReg);
5303 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5304 }
5305 }
5306 else
5307 {
5308 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5309 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5310 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5311 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5312 }
5313 RT_NOREF(pVCpu);
5314}
5315
5316
5317/**
5318 * Raises a FPU stack underflow exception.
5319 *
5320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5321 * @param iStReg The destination register that should be loaded
5322 * with QNaN if \#IS is not masked. Specify
5323 * UINT8_MAX if none (like for fcom).
5324 * @param uFpuOpcode The FPU opcode value.
5325 */
5326void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5327{
5328 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5329 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5330 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5331}
5332
5333
5334void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5335{
5336 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5337 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5338 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5339 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5340}
5341
5342
5343void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5347 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5348 iemFpuMaybePopOne(pFpuCtx);
5349}
5350
5351
5352void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5353 uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5357 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5358 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5359 iemFpuMaybePopOne(pFpuCtx);
5360}
5361
5362
5363void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5364{
5365 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5366 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5367 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5368 iemFpuMaybePopOne(pFpuCtx);
5369 iemFpuMaybePopOne(pFpuCtx);
5370}
5371
5372
5373void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5374{
5375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5376 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5377
5378 if (pFpuCtx->FCW & X86_FCW_IM)
5379 {
5380 /* Masked overflow - Push QNaN. */
5381 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5382 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5383 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5384 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5385 pFpuCtx->FTW |= RT_BIT(iNewTop);
5386 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5387 iemFpuRotateStackPush(pFpuCtx);
5388 }
5389 else
5390 {
5391 /* Exception pending - don't change TOP or the register stack. */
5392 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5393 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5394 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5395 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5396 }
5397}
5398
5399
5400void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5401{
5402 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5403 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5404
5405 if (pFpuCtx->FCW & X86_FCW_IM)
5406 {
5407 /* Masked overflow - Push QNaN. */
5408 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5409 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5410 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5411 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5412 pFpuCtx->FTW |= RT_BIT(iNewTop);
5413 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5414 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5415 iemFpuRotateStackPush(pFpuCtx);
5416 }
5417 else
5418 {
5419 /* Exception pending - don't change TOP or the register stack. */
5420 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5421 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5422 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5423 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5424 }
5425}
5426
5427
5428/**
5429 * Worker routine for raising an FPU stack overflow exception on a push.
5430 *
5431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5432 * @param pFpuCtx The FPU context.
5433 */
5434static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5435{
5436 if (pFpuCtx->FCW & X86_FCW_IM)
5437 {
5438 /* Masked overflow. */
5439 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5440 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5441 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5442 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5443 pFpuCtx->FTW |= RT_BIT(iNewTop);
5444 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5445 iemFpuRotateStackPush(pFpuCtx);
5446 }
5447 else
5448 {
5449 /* Exception pending - don't change TOP or the register stack. */
5450 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5451 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5452 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5453 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5454 }
5455 RT_NOREF(pVCpu);
5456}
5457
5458
5459/**
5460 * Raises a FPU stack overflow exception on a push.
5461 *
5462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5463 * @param uFpuOpcode The FPU opcode value.
5464 */
5465void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5466{
5467 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5468 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5469 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5470}
5471
5472
5473/**
5474 * Raises a FPU stack overflow exception on a push with a memory operand.
5475 *
5476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5477 * @param iEffSeg The effective memory operand selector register.
5478 * @param GCPtrEff The effective memory operand offset.
5479 * @param uFpuOpcode The FPU opcode value.
5480 */
5481void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5482{
5483 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5484 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5485 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5486 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5487}
5488
5489/** @} */
5490
5491
5492/** @name SSE+AVX SIMD access and helpers.
5493 *
5494 * @{
5495 */
5496/**
5497 * Stores a result in a SIMD XMM register, updates the MXCSR.
5498 *
5499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5500 * @param pResult The result to store.
5501 * @param iXmmReg Which SIMD XMM register to store the result in.
5502 */
5503void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5504{
5505 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5506 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5507
5508 /* The result is only updated if there is no unmasked exception pending. */
5509 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5510 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5511 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5512}
5513
5514
5515/**
5516 * Updates the MXCSR.
5517 *
5518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5519 * @param fMxcsr The new MXCSR value.
5520 */
5521void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5522{
5523 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5524 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5525}
5526/** @} */
5527
5528
5529/** @name Memory access.
5530 *
5531 * @{
5532 */
5533
5534#undef LOG_GROUP
5535#define LOG_GROUP LOG_GROUP_IEM_MEM
5536
5537/**
5538 * Updates the IEMCPU::cbWritten counter if applicable.
5539 *
5540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5541 * @param fAccess The access being accounted for.
5542 * @param cbMem The access size.
5543 */
5544DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5545{
5546 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5547 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5548 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5549}
5550
5551
5552/**
5553 * Applies the segment limit, base and attributes.
5554 *
5555 * This may raise a \#GP or \#SS.
5556 *
5557 * @returns VBox strict status code.
5558 *
5559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5560 * @param fAccess The kind of access which is being performed.
5561 * @param iSegReg The index of the segment register to apply.
5562 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5563 * TSS, ++).
5564 * @param cbMem The access size.
5565 * @param pGCPtrMem Pointer to the guest memory address to apply
5566 * segmentation to. Input and output parameter.
5567 */
5568VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5569{
5570 if (iSegReg == UINT8_MAX)
5571 return VINF_SUCCESS;
5572
5573 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5574 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5575 switch (IEM_GET_CPU_MODE(pVCpu))
5576 {
5577 case IEMMODE_16BIT:
5578 case IEMMODE_32BIT:
5579 {
5580 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5581 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5582
5583 if ( pSel->Attr.n.u1Present
5584 && !pSel->Attr.n.u1Unusable)
5585 {
5586 Assert(pSel->Attr.n.u1DescType);
5587 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5588 {
5589 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5590 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5591 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5592
5593 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5594 {
5595 /** @todo CPL check. */
5596 }
5597
5598 /*
5599 * There are two kinds of data selectors, normal and expand down.
5600 */
5601 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5602 {
5603 if ( GCPtrFirst32 > pSel->u32Limit
5604 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5605 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5606 }
5607 else
5608 {
5609 /*
5610 * The upper boundary is defined by the B bit, not the G bit!
5611 */
5612 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5613 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5614 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5615 }
5616 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5617 }
5618 else
5619 {
5620 /*
5621 * Code selector and usually be used to read thru, writing is
5622 * only permitted in real and V8086 mode.
5623 */
5624 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5625 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5626 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5627 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5628 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5629
5630 if ( GCPtrFirst32 > pSel->u32Limit
5631 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5632 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5633
5634 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5635 {
5636 /** @todo CPL check. */
5637 }
5638
5639 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5640 }
5641 }
5642 else
5643 return iemRaiseGeneralProtectionFault0(pVCpu);
5644 return VINF_SUCCESS;
5645 }
5646
5647 case IEMMODE_64BIT:
5648 {
5649 RTGCPTR GCPtrMem = *pGCPtrMem;
5650 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5651 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5652
5653 Assert(cbMem >= 1);
5654 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5655 return VINF_SUCCESS;
5656 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5657 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5658 return iemRaiseGeneralProtectionFault0(pVCpu);
5659 }
5660
5661 default:
5662 AssertFailedReturn(VERR_IEM_IPE_7);
5663 }
5664}
5665
5666
5667/**
5668 * Translates a virtual address to a physical physical address and checks if we
5669 * can access the page as specified.
5670 *
5671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5672 * @param GCPtrMem The virtual address.
5673 * @param cbAccess The access size, for raising \#PF correctly for
5674 * FXSAVE and such.
5675 * @param fAccess The intended access.
5676 * @param pGCPhysMem Where to return the physical address.
5677 */
5678VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5679 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5680{
5681 /** @todo Need a different PGM interface here. We're currently using
5682 * generic / REM interfaces. this won't cut it for R0. */
5683 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5684 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5685 * here. */
5686 PGMPTWALK Walk;
5687 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5688 if (RT_FAILURE(rc))
5689 {
5690 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5691 /** @todo Check unassigned memory in unpaged mode. */
5692 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5693#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5694 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5695 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5696#endif
5697 *pGCPhysMem = NIL_RTGCPHYS;
5698 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5699 }
5700
5701 /* If the page is writable and does not have the no-exec bit set, all
5702 access is allowed. Otherwise we'll have to check more carefully... */
5703 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5704 {
5705 /* Write to read only memory? */
5706 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5707 && !(Walk.fEffective & X86_PTE_RW)
5708 && ( ( IEM_GET_CPL(pVCpu) == 3
5709 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5710 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5711 {
5712 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5713 *pGCPhysMem = NIL_RTGCPHYS;
5714#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5715 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5716 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5717#endif
5718 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5719 }
5720
5721 /* Kernel memory accessed by userland? */
5722 if ( !(Walk.fEffective & X86_PTE_US)
5723 && IEM_GET_CPL(pVCpu) == 3
5724 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5725 {
5726 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5727 *pGCPhysMem = NIL_RTGCPHYS;
5728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5729 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5730 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5731#endif
5732 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5733 }
5734
5735 /* Executing non-executable memory? */
5736 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5737 && (Walk.fEffective & X86_PTE_PAE_NX)
5738 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5739 {
5740 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5741 *pGCPhysMem = NIL_RTGCPHYS;
5742#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5743 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5744 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5745#endif
5746 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5747 VERR_ACCESS_DENIED);
5748 }
5749 }
5750
5751 /*
5752 * Set the dirty / access flags.
5753 * ASSUMES this is set when the address is translated rather than on committ...
5754 */
5755 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5756 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5757 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5758 {
5759 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5760 AssertRC(rc2);
5761 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5762 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5763 }
5764
5765 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5766 *pGCPhysMem = GCPhys;
5767 return VINF_SUCCESS;
5768}
5769
5770
5771/**
5772 * Looks up a memory mapping entry.
5773 *
5774 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5776 * @param pvMem The memory address.
5777 * @param fAccess The access to.
5778 */
5779DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5780{
5781 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5782 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5783 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5784 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5785 return 0;
5786 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5787 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5788 return 1;
5789 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5790 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5791 return 2;
5792 return VERR_NOT_FOUND;
5793}
5794
5795
5796/**
5797 * Finds a free memmap entry when using iNextMapping doesn't work.
5798 *
5799 * @returns Memory mapping index, 1024 on failure.
5800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5801 */
5802static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5803{
5804 /*
5805 * The easy case.
5806 */
5807 if (pVCpu->iem.s.cActiveMappings == 0)
5808 {
5809 pVCpu->iem.s.iNextMapping = 1;
5810 return 0;
5811 }
5812
5813 /* There should be enough mappings for all instructions. */
5814 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5815
5816 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5817 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5818 return i;
5819
5820 AssertFailedReturn(1024);
5821}
5822
5823
5824/**
5825 * Commits a bounce buffer that needs writing back and unmaps it.
5826 *
5827 * @returns Strict VBox status code.
5828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5829 * @param iMemMap The index of the buffer to commit.
5830 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5831 * Always false in ring-3, obviously.
5832 */
5833static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5834{
5835 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5836 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5837#ifdef IN_RING3
5838 Assert(!fPostponeFail);
5839 RT_NOREF_PV(fPostponeFail);
5840#endif
5841
5842 /*
5843 * Do the writing.
5844 */
5845 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5846 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5847 {
5848 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5849 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5850 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5851 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5852 {
5853 /*
5854 * Carefully and efficiently dealing with access handler return
5855 * codes make this a little bloated.
5856 */
5857 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5859 pbBuf,
5860 cbFirst,
5861 PGMACCESSORIGIN_IEM);
5862 if (rcStrict == VINF_SUCCESS)
5863 {
5864 if (cbSecond)
5865 {
5866 rcStrict = PGMPhysWrite(pVM,
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5868 pbBuf + cbFirst,
5869 cbSecond,
5870 PGMACCESSORIGIN_IEM);
5871 if (rcStrict == VINF_SUCCESS)
5872 { /* nothing */ }
5873 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5874 {
5875 LogEx(LOG_GROUP_IEM,
5876 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5879 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5880 }
5881#ifndef IN_RING3
5882 else if (fPostponeFail)
5883 {
5884 LogEx(LOG_GROUP_IEM,
5885 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5889 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5890 return iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892#endif
5893 else
5894 {
5895 LogEx(LOG_GROUP_IEM,
5896 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5898 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5899 return rcStrict;
5900 }
5901 }
5902 }
5903 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5904 {
5905 if (!cbSecond)
5906 {
5907 LogEx(LOG_GROUP_IEM,
5908 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5911 }
5912 else
5913 {
5914 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5915 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5916 pbBuf + cbFirst,
5917 cbSecond,
5918 PGMACCESSORIGIN_IEM);
5919 if (rcStrict2 == VINF_SUCCESS)
5920 {
5921 LogEx(LOG_GROUP_IEM,
5922 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5925 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5926 }
5927 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5928 {
5929 LogEx(LOG_GROUP_IEM,
5930 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5933 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5934 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5935 }
5936#ifndef IN_RING3
5937 else if (fPostponeFail)
5938 {
5939 LogEx(LOG_GROUP_IEM,
5940 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5943 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5944 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5945 return iemSetPassUpStatus(pVCpu, rcStrict);
5946 }
5947#endif
5948 else
5949 {
5950 LogEx(LOG_GROUP_IEM,
5951 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5952 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5954 return rcStrict2;
5955 }
5956 }
5957 }
5958#ifndef IN_RING3
5959 else if (fPostponeFail)
5960 {
5961 LogEx(LOG_GROUP_IEM,
5962 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5964 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5965 if (!cbSecond)
5966 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5967 else
5968 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5969 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5970 return iemSetPassUpStatus(pVCpu, rcStrict);
5971 }
5972#endif
5973 else
5974 {
5975 LogEx(LOG_GROUP_IEM,
5976 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5977 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5978 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5979 return rcStrict;
5980 }
5981 }
5982 else
5983 {
5984 /*
5985 * No access handlers, much simpler.
5986 */
5987 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5988 if (RT_SUCCESS(rc))
5989 {
5990 if (cbSecond)
5991 {
5992 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5993 if (RT_SUCCESS(rc))
5994 { /* likely */ }
5995 else
5996 {
5997 LogEx(LOG_GROUP_IEM,
5998 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5999 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6000 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6001 return rc;
6002 }
6003 }
6004 }
6005 else
6006 {
6007 LogEx(LOG_GROUP_IEM,
6008 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6009 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6010 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6011 return rc;
6012 }
6013 }
6014 }
6015
6016#if defined(IEM_LOG_MEMORY_WRITES)
6017 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6018 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6019 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6020 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6021 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6022 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6023
6024 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6025 g_cbIemWrote = cbWrote;
6026 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6027#endif
6028
6029 /*
6030 * Free the mapping entry.
6031 */
6032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6033 Assert(pVCpu->iem.s.cActiveMappings != 0);
6034 pVCpu->iem.s.cActiveMappings--;
6035 return VINF_SUCCESS;
6036}
6037
6038
6039/**
6040 * iemMemMap worker that deals with a request crossing pages.
6041 */
6042static VBOXSTRICTRC
6043iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6044 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6045{
6046 Assert(cbMem <= GUEST_PAGE_SIZE);
6047
6048 /*
6049 * Do the address translations.
6050 */
6051 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6052 RTGCPHYS GCPhysFirst;
6053 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6054 if (rcStrict != VINF_SUCCESS)
6055 return rcStrict;
6056 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6057
6058 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6059 RTGCPHYS GCPhysSecond;
6060 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6061 cbSecondPage, fAccess, &GCPhysSecond);
6062 if (rcStrict != VINF_SUCCESS)
6063 return rcStrict;
6064 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6065 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6066
6067 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6068
6069 /*
6070 * Read in the current memory content if it's a read, execute or partial
6071 * write access.
6072 */
6073 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6074
6075 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6076 {
6077 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6078 {
6079 /*
6080 * Must carefully deal with access handler status codes here,
6081 * makes the code a bit bloated.
6082 */
6083 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6084 if (rcStrict == VINF_SUCCESS)
6085 {
6086 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6087 if (rcStrict == VINF_SUCCESS)
6088 { /*likely */ }
6089 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6090 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6091 else
6092 {
6093 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6094 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6095 return rcStrict;
6096 }
6097 }
6098 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6099 {
6100 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6101 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6102 {
6103 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6105 }
6106 else
6107 {
6108 LogEx(LOG_GROUP_IEM,
6109 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6110 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6111 return rcStrict2;
6112 }
6113 }
6114 else
6115 {
6116 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6117 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6118 return rcStrict;
6119 }
6120 }
6121 else
6122 {
6123 /*
6124 * No informational status codes here, much more straight forward.
6125 */
6126 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6127 if (RT_SUCCESS(rc))
6128 {
6129 Assert(rc == VINF_SUCCESS);
6130 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6131 if (RT_SUCCESS(rc))
6132 Assert(rc == VINF_SUCCESS);
6133 else
6134 {
6135 LogEx(LOG_GROUP_IEM,
6136 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6137 return rc;
6138 }
6139 }
6140 else
6141 {
6142 LogEx(LOG_GROUP_IEM,
6143 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6144 return rc;
6145 }
6146 }
6147 }
6148#ifdef VBOX_STRICT
6149 else
6150 memset(pbBuf, 0xcc, cbMem);
6151 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6152 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6153#endif
6154 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6155
6156 /*
6157 * Commit the bounce buffer entry.
6158 */
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6163 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6164 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6165 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6166 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6167 pVCpu->iem.s.cActiveMappings++;
6168
6169 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6170 *ppvMem = pbBuf;
6171 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * iemMemMap woker that deals with iemMemPageMap failures.
6178 */
6179static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6180 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6181{
6182 /*
6183 * Filter out conditions we can handle and the ones which shouldn't happen.
6184 */
6185 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6186 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6187 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6188 {
6189 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6190 return rcMap;
6191 }
6192 pVCpu->iem.s.cPotentialExits++;
6193
6194 /*
6195 * Read in the current memory content if it's a read, execute or partial
6196 * write access.
6197 */
6198 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6199 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6200 {
6201 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6202 memset(pbBuf, 0xff, cbMem);
6203 else
6204 {
6205 int rc;
6206 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6207 {
6208 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6209 if (rcStrict == VINF_SUCCESS)
6210 { /* nothing */ }
6211 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6212 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6213 else
6214 {
6215 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6216 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6217 return rcStrict;
6218 }
6219 }
6220 else
6221 {
6222 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6223 if (RT_SUCCESS(rc))
6224 { /* likely */ }
6225 else
6226 {
6227 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6228 GCPhysFirst, rc));
6229 return rc;
6230 }
6231 }
6232 }
6233 }
6234#ifdef VBOX_STRICT
6235 else
6236 memset(pbBuf, 0xcc, cbMem);
6237#endif
6238#ifdef VBOX_STRICT
6239 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6240 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6241#endif
6242
6243 /*
6244 * Commit the bounce buffer entry.
6245 */
6246 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6247 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6248 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6249 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6250 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6251 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6252 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6253 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6254 pVCpu->iem.s.cActiveMappings++;
6255
6256 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6257 *ppvMem = pbBuf;
6258 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6259 return VINF_SUCCESS;
6260}
6261
6262
6263
6264/**
6265 * Maps the specified guest memory for the given kind of access.
6266 *
6267 * This may be using bounce buffering of the memory if it's crossing a page
6268 * boundary or if there is an access handler installed for any of it. Because
6269 * of lock prefix guarantees, we're in for some extra clutter when this
6270 * happens.
6271 *
6272 * This may raise a \#GP, \#SS, \#PF or \#AC.
6273 *
6274 * @returns VBox strict status code.
6275 *
6276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6277 * @param ppvMem Where to return the pointer to the mapped memory.
6278 * @param pbUnmapInfo Where to return unmap info to be passed to
6279 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6280 * done.
6281 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6282 * 8, 12, 16, 32 or 512. When used by string operations
6283 * it can be up to a page.
6284 * @param iSegReg The index of the segment register to use for this
6285 * access. The base and limits are checked. Use UINT8_MAX
6286 * to indicate that no segmentation is required (for IDT,
6287 * GDT and LDT accesses).
6288 * @param GCPtrMem The address of the guest memory.
6289 * @param fAccess How the memory is being accessed. The
6290 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6291 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6292 * when raising exceptions.
6293 * @param uAlignCtl Alignment control:
6294 * - Bits 15:0 is the alignment mask.
6295 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6296 * IEM_MEMMAP_F_ALIGN_SSE, and
6297 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6298 * Pass zero to skip alignment.
6299 */
6300VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6301 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6302{
6303 /*
6304 * Check the input and figure out which mapping entry to use.
6305 */
6306 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6307 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6308 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6309 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6310 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6311
6312 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6313 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6314 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6315 {
6316 iMemMap = iemMemMapFindFree(pVCpu);
6317 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6318 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6319 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6320 pVCpu->iem.s.aMemMappings[2].fAccess),
6321 VERR_IEM_IPE_9);
6322 }
6323
6324 /*
6325 * Map the memory, checking that we can actually access it. If something
6326 * slightly complicated happens, fall back on bounce buffering.
6327 */
6328 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6329 if (rcStrict == VINF_SUCCESS)
6330 { /* likely */ }
6331 else
6332 return rcStrict;
6333
6334 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6335 { /* likely */ }
6336 else
6337 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6338
6339 /*
6340 * Alignment check.
6341 */
6342 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6343 { /* likelyish */ }
6344 else
6345 {
6346 /* Misaligned access. */
6347 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6348 {
6349 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6350 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6351 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6352 {
6353 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6354
6355 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6356 return iemRaiseAlignmentCheckException(pVCpu);
6357 }
6358 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6359 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6360 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6361 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6362 * that's what FXSAVE does on a 10980xe. */
6363 && iemMemAreAlignmentChecksEnabled(pVCpu))
6364 return iemRaiseAlignmentCheckException(pVCpu);
6365 else
6366 return iemRaiseGeneralProtectionFault0(pVCpu);
6367 }
6368 }
6369
6370#ifdef IEM_WITH_DATA_TLB
6371 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6372
6373 /*
6374 * Get the TLB entry for this page.
6375 */
6376 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6377 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6378 if (pTlbe->uTag == uTag)
6379 {
6380# ifdef VBOX_WITH_STATISTICS
6381 pVCpu->iem.s.DataTlb.cTlbHits++;
6382# endif
6383 }
6384 else
6385 {
6386 pVCpu->iem.s.DataTlb.cTlbMisses++;
6387 PGMPTWALK Walk;
6388 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6389 if (RT_FAILURE(rc))
6390 {
6391 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6392# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6393 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6394 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6395# endif
6396 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6397 }
6398
6399 Assert(Walk.fSucceeded);
6400 pTlbe->uTag = uTag;
6401 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6402 pTlbe->GCPhys = Walk.GCPhys;
6403 pTlbe->pbMappingR3 = NULL;
6404 }
6405
6406 /*
6407 * Check TLB page table level access flags.
6408 */
6409 /* If the page is either supervisor only or non-writable, we need to do
6410 more careful access checks. */
6411 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6412 {
6413 /* Write to read only memory? */
6414 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6415 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6416 && ( ( IEM_GET_CPL(pVCpu) == 3
6417 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6418 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6419 {
6420 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6421# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6422 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6423 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6424# endif
6425 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6426 }
6427
6428 /* Kernel memory accessed by userland? */
6429 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6430 && IEM_GET_CPL(pVCpu) == 3
6431 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6432 {
6433 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6434# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6435 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6436 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6437# endif
6438 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6439 }
6440 }
6441
6442 /*
6443 * Set the dirty / access flags.
6444 * ASSUMES this is set when the address is translated rather than on commit...
6445 */
6446 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6447 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6448 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6449 {
6450 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6451 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6452 AssertRC(rc2);
6453 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6454 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6455 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6456 }
6457
6458 /*
6459 * Look up the physical page info if necessary.
6460 */
6461 uint8_t *pbMem = NULL;
6462 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6463# ifdef IN_RING3
6464 pbMem = pTlbe->pbMappingR3;
6465# else
6466 pbMem = NULL;
6467# endif
6468 else
6469 {
6470 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6471 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6472 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6473 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6474 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6475 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6476 { /* likely */ }
6477 else
6478 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6479 pTlbe->pbMappingR3 = NULL;
6480 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6481 | IEMTLBE_F_NO_MAPPINGR3
6482 | IEMTLBE_F_PG_NO_READ
6483 | IEMTLBE_F_PG_NO_WRITE
6484 | IEMTLBE_F_PG_UNASSIGNED
6485 | IEMTLBE_F_PG_CODE_PAGE);
6486 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6487 &pbMem, &pTlbe->fFlagsAndPhysRev);
6488 AssertRCReturn(rc, rc);
6489# ifdef IN_RING3
6490 pTlbe->pbMappingR3 = pbMem;
6491# endif
6492 }
6493
6494 /*
6495 * Check the physical page level access and mapping.
6496 */
6497 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6498 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6499 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6500 { /* probably likely */ }
6501 else
6502 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6503 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6504 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6505 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6506 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6507 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6508
6509 if (pbMem)
6510 {
6511 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6512 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6513 fAccess |= IEM_ACCESS_NOT_LOCKED;
6514 }
6515 else
6516 {
6517 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6518 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6519 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6520 if (rcStrict != VINF_SUCCESS)
6521 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6522 }
6523
6524 void * const pvMem = pbMem;
6525
6526 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6527 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6528 if (fAccess & IEM_ACCESS_TYPE_READ)
6529 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6530
6531#else /* !IEM_WITH_DATA_TLB */
6532
6533 RTGCPHYS GCPhysFirst;
6534 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6535 if (rcStrict != VINF_SUCCESS)
6536 return rcStrict;
6537
6538 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6539 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6540 if (fAccess & IEM_ACCESS_TYPE_READ)
6541 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6542
6543 void *pvMem;
6544 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6545 if (rcStrict != VINF_SUCCESS)
6546 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6547
6548#endif /* !IEM_WITH_DATA_TLB */
6549
6550 /*
6551 * Fill in the mapping table entry.
6552 */
6553 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6555 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6556 pVCpu->iem.s.cActiveMappings += 1;
6557
6558 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6559 *ppvMem = pvMem;
6560 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6561 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6562 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6563
6564 return VINF_SUCCESS;
6565}
6566
6567
6568/**
6569 * Commits the guest memory if bounce buffered and unmaps it.
6570 *
6571 * @returns Strict VBox status code.
6572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6573 * @param bUnmapInfo Unmap info set by iemMemMap.
6574 */
6575VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6576{
6577 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6578 AssertMsgReturn( (bUnmapInfo & 0x08)
6579 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6580 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6581 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6582 VERR_NOT_FOUND);
6583
6584 /* If it's bounce buffered, we may need to write back the buffer. */
6585 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6586 {
6587 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6588 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6589 }
6590 /* Otherwise unlock it. */
6591 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6592 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6593
6594 /* Free the entry. */
6595 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6596 Assert(pVCpu->iem.s.cActiveMappings != 0);
6597 pVCpu->iem.s.cActiveMappings--;
6598 return VINF_SUCCESS;
6599}
6600
6601
6602/**
6603 * Rolls back the guest memory (conceptually only) and unmaps it.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 * @param bUnmapInfo Unmap info set by iemMemMap.
6607 */
6608void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6609{
6610 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6611 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6612 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6613 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6614 == ((unsigned)bUnmapInfo >> 4),
6615 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6616
6617 /* Unlock it if necessary. */
6618 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6619 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6620
6621 /* Free the entry. */
6622 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6623 Assert(pVCpu->iem.s.cActiveMappings != 0);
6624 pVCpu->iem.s.cActiveMappings--;
6625}
6626
6627#ifdef IEM_WITH_SETJMP
6628
6629/**
6630 * Maps the specified guest memory for the given kind of access, longjmp on
6631 * error.
6632 *
6633 * This may be using bounce buffering of the memory if it's crossing a page
6634 * boundary or if there is an access handler installed for any of it. Because
6635 * of lock prefix guarantees, we're in for some extra clutter when this
6636 * happens.
6637 *
6638 * This may raise a \#GP, \#SS, \#PF or \#AC.
6639 *
6640 * @returns Pointer to the mapped memory.
6641 *
6642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6643 * @param bUnmapInfo Where to return unmap info to be passed to
6644 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6645 * iemMemCommitAndUnmapWoSafeJmp,
6646 * iemMemCommitAndUnmapRoSafeJmp,
6647 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6648 * when done.
6649 * @param cbMem The number of bytes to map. This is usually 1,
6650 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6651 * string operations it can be up to a page.
6652 * @param iSegReg The index of the segment register to use for
6653 * this access. The base and limits are checked.
6654 * Use UINT8_MAX to indicate that no segmentation
6655 * is required (for IDT, GDT and LDT accesses).
6656 * @param GCPtrMem The address of the guest memory.
6657 * @param fAccess How the memory is being accessed. The
6658 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6659 * how to map the memory, while the
6660 * IEM_ACCESS_WHAT_XXX bit is used when raising
6661 * exceptions.
6662 * @param uAlignCtl Alignment control:
6663 * - Bits 15:0 is the alignment mask.
6664 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6665 * IEM_MEMMAP_F_ALIGN_SSE, and
6666 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6667 * Pass zero to skip alignment.
6668 */
6669void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6670 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6671{
6672 /*
6673 * Check the input, check segment access and adjust address
6674 * with segment base.
6675 */
6676 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6677 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6678 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6679
6680 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6681 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6682 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6683
6684 /*
6685 * Alignment check.
6686 */
6687 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6688 { /* likelyish */ }
6689 else
6690 {
6691 /* Misaligned access. */
6692 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6693 {
6694 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6695 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6696 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6697 {
6698 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6699
6700 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6701 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6702 }
6703 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6704 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6705 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6706 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6707 * that's what FXSAVE does on a 10980xe. */
6708 && iemMemAreAlignmentChecksEnabled(pVCpu))
6709 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6710 else
6711 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6712 }
6713 }
6714
6715 /*
6716 * Figure out which mapping entry to use.
6717 */
6718 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6719 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6720 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6721 {
6722 iMemMap = iemMemMapFindFree(pVCpu);
6723 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6724 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6725 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6726 pVCpu->iem.s.aMemMappings[2].fAccess),
6727 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6728 }
6729
6730 /*
6731 * Crossing a page boundary?
6732 */
6733 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6734 { /* No (likely). */ }
6735 else
6736 {
6737 void *pvMem;
6738 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6739 if (rcStrict == VINF_SUCCESS)
6740 return pvMem;
6741 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6742 }
6743
6744#ifdef IEM_WITH_DATA_TLB
6745 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6746
6747 /*
6748 * Get the TLB entry for this page.
6749 */
6750 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6751 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6752 if (pTlbe->uTag == uTag)
6753 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6754 else
6755 {
6756 pVCpu->iem.s.DataTlb.cTlbMisses++;
6757 PGMPTWALK Walk;
6758 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6759 if (RT_FAILURE(rc))
6760 {
6761 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6762# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6763 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6764 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6765# endif
6766 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6767 }
6768
6769 Assert(Walk.fSucceeded);
6770 pTlbe->uTag = uTag;
6771 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6772 pTlbe->GCPhys = Walk.GCPhys;
6773 pTlbe->pbMappingR3 = NULL;
6774 }
6775
6776 /*
6777 * Check the flags and physical revision.
6778 */
6779 /** @todo make the caller pass these in with fAccess. */
6780 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6781 ? IEMTLBE_F_PT_NO_USER : 0;
6782 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6783 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6784 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6785 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6786 ? IEMTLBE_F_PT_NO_WRITE : 0)
6787 : 0;
6788 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6789 uint8_t *pbMem = NULL;
6790 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6791 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6792# ifdef IN_RING3
6793 pbMem = pTlbe->pbMappingR3;
6794# else
6795 pbMem = NULL;
6796# endif
6797 else
6798 {
6799 /*
6800 * Okay, something isn't quite right or needs refreshing.
6801 */
6802 /* Write to read only memory? */
6803 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6804 {
6805 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6806# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6807 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6808 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6809# endif
6810 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6811 }
6812
6813 /* Kernel memory accessed by userland? */
6814 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6815 {
6816 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6817# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6818 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6819 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6820# endif
6821 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6822 }
6823
6824 /* Set the dirty / access flags.
6825 ASSUMES this is set when the address is translated rather than on commit... */
6826 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6827 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6828 {
6829 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6830 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6831 AssertRC(rc2);
6832 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6833 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6834 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6835 }
6836
6837 /*
6838 * Check if the physical page info needs updating.
6839 */
6840 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6841# ifdef IN_RING3
6842 pbMem = pTlbe->pbMappingR3;
6843# else
6844 pbMem = NULL;
6845# endif
6846 else
6847 {
6848 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6849 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6850 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6851 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6852 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6853 pTlbe->pbMappingR3 = NULL;
6854 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6855 | IEMTLBE_F_NO_MAPPINGR3
6856 | IEMTLBE_F_PG_NO_READ
6857 | IEMTLBE_F_PG_NO_WRITE
6858 | IEMTLBE_F_PG_UNASSIGNED
6859 | IEMTLBE_F_PG_CODE_PAGE);
6860 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6861 &pbMem, &pTlbe->fFlagsAndPhysRev);
6862 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6863# ifdef IN_RING3
6864 pTlbe->pbMappingR3 = pbMem;
6865# endif
6866 }
6867
6868 /*
6869 * Check the physical page level access and mapping.
6870 */
6871 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6872 { /* probably likely */ }
6873 else
6874 {
6875 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6876 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6877 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6878 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6879 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6880 if (rcStrict == VINF_SUCCESS)
6881 return pbMem;
6882 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6883 }
6884 }
6885 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6886
6887 if (pbMem)
6888 {
6889 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6890 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6891 fAccess |= IEM_ACCESS_NOT_LOCKED;
6892 }
6893 else
6894 {
6895 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6896 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6897 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6898 if (rcStrict == VINF_SUCCESS)
6899 {
6900 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6901 return pbMem;
6902 }
6903 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6904 }
6905
6906 void * const pvMem = pbMem;
6907
6908 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6909 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6910 if (fAccess & IEM_ACCESS_TYPE_READ)
6911 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6912
6913#else /* !IEM_WITH_DATA_TLB */
6914
6915
6916 RTGCPHYS GCPhysFirst;
6917 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6918 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6919 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6920
6921 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6922 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6923 if (fAccess & IEM_ACCESS_TYPE_READ)
6924 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6925
6926 void *pvMem;
6927 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6928 if (rcStrict == VINF_SUCCESS)
6929 { /* likely */ }
6930 else
6931 {
6932 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6933 if (rcStrict == VINF_SUCCESS)
6934 return pvMem;
6935 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6936 }
6937
6938#endif /* !IEM_WITH_DATA_TLB */
6939
6940 /*
6941 * Fill in the mapping table entry.
6942 */
6943 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6944 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6945 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6946 pVCpu->iem.s.cActiveMappings++;
6947
6948 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6949
6950 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6951 return pvMem;
6952}
6953
6954
6955/**
6956 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 * @param pvMem The mapping.
6960 * @param fAccess The kind of access.
6961 */
6962void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6963{
6964 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6965 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6966 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6967 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6968 == ((unsigned)bUnmapInfo >> 4),
6969 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6970
6971 /* If it's bounce buffered, we may need to write back the buffer. */
6972 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6973 {
6974 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6975 {
6976 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6977 if (rcStrict == VINF_SUCCESS)
6978 return;
6979 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6980 }
6981 }
6982 /* Otherwise unlock it. */
6983 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6984 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6985
6986 /* Free the entry. */
6987 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6988 Assert(pVCpu->iem.s.cActiveMappings != 0);
6989 pVCpu->iem.s.cActiveMappings--;
6990}
6991
6992
6993/** Fallback for iemMemCommitAndUnmapRwJmp. */
6994void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6995{
6996 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6997 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6998}
6999
7000
7001/** Fallback for iemMemCommitAndUnmapWoJmp. */
7002void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7003{
7004 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7005 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7006}
7007
7008
7009/** Fallback for iemMemCommitAndUnmapRoJmp. */
7010void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7011{
7012 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7013 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7014}
7015
7016
7017/** Fallback for iemMemRollbackAndUnmapWo. */
7018void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7019{
7020 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7021 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7022}
7023
7024#endif /* IEM_WITH_SETJMP */
7025
7026#ifndef IN_RING3
7027/**
7028 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7029 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7030 *
7031 * Allows the instruction to be completed and retired, while the IEM user will
7032 * return to ring-3 immediately afterwards and do the postponed writes there.
7033 *
7034 * @returns VBox status code (no strict statuses). Caller must check
7035 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7037 * @param pvMem The mapping.
7038 * @param fAccess The kind of access.
7039 */
7040VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7041{
7042 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7043 AssertMsgReturn( (bUnmapInfo & 0x08)
7044 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7045 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7046 == ((unsigned)bUnmapInfo >> 4),
7047 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7048 VERR_NOT_FOUND);
7049
7050 /* If it's bounce buffered, we may need to write back the buffer. */
7051 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7052 {
7053 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7054 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7055 }
7056 /* Otherwise unlock it. */
7057 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7058 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7059
7060 /* Free the entry. */
7061 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7062 Assert(pVCpu->iem.s.cActiveMappings != 0);
7063 pVCpu->iem.s.cActiveMappings--;
7064 return VINF_SUCCESS;
7065}
7066#endif
7067
7068
7069/**
7070 * Rollbacks mappings, releasing page locks and such.
7071 *
7072 * The caller shall only call this after checking cActiveMappings.
7073 *
7074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7075 */
7076void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7077{
7078 Assert(pVCpu->iem.s.cActiveMappings > 0);
7079
7080 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7081 while (iMemMap-- > 0)
7082 {
7083 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7084 if (fAccess != IEM_ACCESS_INVALID)
7085 {
7086 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7087 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7088 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7089 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7090 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7091 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7092 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7093 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7094 pVCpu->iem.s.cActiveMappings--;
7095 }
7096 }
7097}
7098
7099
7100/*
7101 * Instantiate R/W templates.
7102 */
7103#define TMPL_MEM_WITH_STACK
7104
7105#define TMPL_MEM_TYPE uint8_t
7106#define TMPL_MEM_FN_SUFF U8
7107#define TMPL_MEM_FMT_TYPE "%#04x"
7108#define TMPL_MEM_FMT_DESC "byte"
7109#include "IEMAllMemRWTmpl.cpp.h"
7110
7111#define TMPL_MEM_TYPE uint16_t
7112#define TMPL_MEM_FN_SUFF U16
7113#define TMPL_MEM_FMT_TYPE "%#06x"
7114#define TMPL_MEM_FMT_DESC "word"
7115#include "IEMAllMemRWTmpl.cpp.h"
7116
7117#define TMPL_WITH_PUSH_SREG
7118#define TMPL_MEM_TYPE uint32_t
7119#define TMPL_MEM_FN_SUFF U32
7120#define TMPL_MEM_FMT_TYPE "%#010x"
7121#define TMPL_MEM_FMT_DESC "dword"
7122#include "IEMAllMemRWTmpl.cpp.h"
7123#undef TMPL_WITH_PUSH_SREG
7124
7125#define TMPL_MEM_TYPE uint64_t
7126#define TMPL_MEM_FN_SUFF U64
7127#define TMPL_MEM_FMT_TYPE "%#018RX64"
7128#define TMPL_MEM_FMT_DESC "qword"
7129#include "IEMAllMemRWTmpl.cpp.h"
7130
7131#undef TMPL_MEM_WITH_STACK
7132
7133#define TMPL_MEM_TYPE uint64_t
7134#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7135#define TMPL_MEM_FN_SUFF U64AlignedU128
7136#define TMPL_MEM_FMT_TYPE "%#018RX64"
7137#define TMPL_MEM_FMT_DESC "qword"
7138#include "IEMAllMemRWTmpl.cpp.h"
7139
7140/* See IEMAllMemRWTmplInline.cpp.h */
7141#define TMPL_MEM_BY_REF
7142
7143#define TMPL_MEM_TYPE RTFLOAT80U
7144#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7145#define TMPL_MEM_FN_SUFF R80
7146#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7147#define TMPL_MEM_FMT_DESC "tword"
7148#include "IEMAllMemRWTmpl.cpp.h"
7149
7150#define TMPL_MEM_TYPE RTPBCD80U
7151#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7152#define TMPL_MEM_FN_SUFF D80
7153#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7154#define TMPL_MEM_FMT_DESC "tword"
7155#include "IEMAllMemRWTmpl.cpp.h"
7156
7157#define TMPL_MEM_TYPE RTUINT128U
7158#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7159#define TMPL_MEM_FN_SUFF U128
7160#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7161#define TMPL_MEM_FMT_DESC "dqword"
7162#include "IEMAllMemRWTmpl.cpp.h"
7163
7164
7165/**
7166 * Fetches a data dword and zero extends it to a qword.
7167 *
7168 * @returns Strict VBox status code.
7169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7170 * @param pu64Dst Where to return the qword.
7171 * @param iSegReg The index of the segment register to use for
7172 * this access. The base and limits are checked.
7173 * @param GCPtrMem The address of the guest memory.
7174 */
7175VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7176{
7177 /* The lazy approach for now... */
7178 uint8_t bUnmapInfo;
7179 uint32_t const *pu32Src;
7180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7181 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7182 if (rc == VINF_SUCCESS)
7183 {
7184 *pu64Dst = *pu32Src;
7185 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7186 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7187 }
7188 return rc;
7189}
7190
7191
7192#ifdef SOME_UNUSED_FUNCTION
7193/**
7194 * Fetches a data dword and sign extends it to a qword.
7195 *
7196 * @returns Strict VBox status code.
7197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7198 * @param pu64Dst Where to return the sign extended value.
7199 * @param iSegReg The index of the segment register to use for
7200 * this access. The base and limits are checked.
7201 * @param GCPtrMem The address of the guest memory.
7202 */
7203VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7204{
7205 /* The lazy approach for now... */
7206 uint8_t bUnmapInfo;
7207 int32_t const *pi32Src;
7208 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7209 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7210 if (rc == VINF_SUCCESS)
7211 {
7212 *pu64Dst = *pi32Src;
7213 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7214 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7215 }
7216#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7217 else
7218 *pu64Dst = 0;
7219#endif
7220 return rc;
7221}
7222#endif
7223
7224
7225/**
7226 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7227 * related.
7228 *
7229 * Raises \#GP(0) if not aligned.
7230 *
7231 * @returns Strict VBox status code.
7232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7233 * @param pu128Dst Where to return the qword.
7234 * @param iSegReg The index of the segment register to use for
7235 * this access. The base and limits are checked.
7236 * @param GCPtrMem The address of the guest memory.
7237 */
7238VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7239{
7240 /* The lazy approach for now... */
7241 uint8_t bUnmapInfo;
7242 PCRTUINT128U pu128Src;
7243 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7244 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7245 if (rc == VINF_SUCCESS)
7246 {
7247 pu128Dst->au64[0] = pu128Src->au64[0];
7248 pu128Dst->au64[1] = pu128Src->au64[1];
7249 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7250 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7251 }
7252 return rc;
7253}
7254
7255
7256#ifdef IEM_WITH_SETJMP
7257/**
7258 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7259 * related, longjmp on error.
7260 *
7261 * Raises \#GP(0) if not aligned.
7262 *
7263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7264 * @param pu128Dst Where to return the qword.
7265 * @param iSegReg The index of the segment register to use for
7266 * this access. The base and limits are checked.
7267 * @param GCPtrMem The address of the guest memory.
7268 */
7269void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7270 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7271{
7272 /* The lazy approach for now... */
7273 uint8_t bUnmapInfo;
7274 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7275 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7276 pu128Dst->au64[0] = pu128Src->au64[0];
7277 pu128Dst->au64[1] = pu128Src->au64[1];
7278 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7279 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7280}
7281#endif
7282
7283
7284/**
7285 * Fetches a data oword (octo word), generally AVX related.
7286 *
7287 * @returns Strict VBox status code.
7288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7289 * @param pu256Dst Where to return the qword.
7290 * @param iSegReg The index of the segment register to use for
7291 * this access. The base and limits are checked.
7292 * @param GCPtrMem The address of the guest memory.
7293 */
7294VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7295{
7296 /* The lazy approach for now... */
7297 uint8_t bUnmapInfo;
7298 PCRTUINT256U pu256Src;
7299 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7300 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7301 if (rc == VINF_SUCCESS)
7302 {
7303 pu256Dst->au64[0] = pu256Src->au64[0];
7304 pu256Dst->au64[1] = pu256Src->au64[1];
7305 pu256Dst->au64[2] = pu256Src->au64[2];
7306 pu256Dst->au64[3] = pu256Src->au64[3];
7307 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7308 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7309 }
7310 return rc;
7311}
7312
7313
7314#ifdef IEM_WITH_SETJMP
7315/**
7316 * Fetches a data oword (octo word), generally AVX related.
7317 *
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pu256Dst Where to return the qword.
7320 * @param iSegReg The index of the segment register to use for
7321 * this access. The base and limits are checked.
7322 * @param GCPtrMem The address of the guest memory.
7323 */
7324void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7325{
7326 /* The lazy approach for now... */
7327 uint8_t bUnmapInfo;
7328 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7329 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7330 pu256Dst->au64[0] = pu256Src->au64[0];
7331 pu256Dst->au64[1] = pu256Src->au64[1];
7332 pu256Dst->au64[2] = pu256Src->au64[2];
7333 pu256Dst->au64[3] = pu256Src->au64[3];
7334 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7335 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7336}
7337#endif
7338
7339
7340/**
7341 * Fetches a data oword (octo word) at an aligned address, generally AVX
7342 * related.
7343 *
7344 * Raises \#GP(0) if not aligned.
7345 *
7346 * @returns Strict VBox status code.
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pu256Dst Where to return the qword.
7349 * @param iSegReg The index of the segment register to use for
7350 * this access. The base and limits are checked.
7351 * @param GCPtrMem The address of the guest memory.
7352 */
7353VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7354{
7355 /* The lazy approach for now... */
7356 uint8_t bUnmapInfo;
7357 PCRTUINT256U pu256Src;
7358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7359 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7360 if (rc == VINF_SUCCESS)
7361 {
7362 pu256Dst->au64[0] = pu256Src->au64[0];
7363 pu256Dst->au64[1] = pu256Src->au64[1];
7364 pu256Dst->au64[2] = pu256Src->au64[2];
7365 pu256Dst->au64[3] = pu256Src->au64[3];
7366 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7367 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7368 }
7369 return rc;
7370}
7371
7372
7373#ifdef IEM_WITH_SETJMP
7374/**
7375 * Fetches a data oword (octo word) at an aligned address, generally AVX
7376 * related, longjmp on error.
7377 *
7378 * Raises \#GP(0) if not aligned.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pu256Dst Where to return the qword.
7382 * @param iSegReg The index of the segment register to use for
7383 * this access. The base and limits are checked.
7384 * @param GCPtrMem The address of the guest memory.
7385 */
7386void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7387 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7388{
7389 /* The lazy approach for now... */
7390 uint8_t bUnmapInfo;
7391 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7392 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7393 pu256Dst->au64[0] = pu256Src->au64[0];
7394 pu256Dst->au64[1] = pu256Src->au64[1];
7395 pu256Dst->au64[2] = pu256Src->au64[2];
7396 pu256Dst->au64[3] = pu256Src->au64[3];
7397 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7398 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7399}
7400#endif
7401
7402
7403
7404/**
7405 * Fetches a descriptor register (lgdt, lidt).
7406 *
7407 * @returns Strict VBox status code.
7408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7409 * @param pcbLimit Where to return the limit.
7410 * @param pGCPtrBase Where to return the base.
7411 * @param iSegReg The index of the segment register to use for
7412 * this access. The base and limits are checked.
7413 * @param GCPtrMem The address of the guest memory.
7414 * @param enmOpSize The effective operand size.
7415 */
7416VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7417 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7418{
7419 /*
7420 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7421 * little special:
7422 * - The two reads are done separately.
7423 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7424 * - We suspect the 386 to actually commit the limit before the base in
7425 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7426 * don't try emulate this eccentric behavior, because it's not well
7427 * enough understood and rather hard to trigger.
7428 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7429 */
7430 VBOXSTRICTRC rcStrict;
7431 if (IEM_IS_64BIT_CODE(pVCpu))
7432 {
7433 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7434 if (rcStrict == VINF_SUCCESS)
7435 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7436 }
7437 else
7438 {
7439 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7440 if (enmOpSize == IEMMODE_32BIT)
7441 {
7442 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7443 {
7444 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7445 if (rcStrict == VINF_SUCCESS)
7446 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7447 }
7448 else
7449 {
7450 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7451 if (rcStrict == VINF_SUCCESS)
7452 {
7453 *pcbLimit = (uint16_t)uTmp;
7454 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7455 }
7456 }
7457 if (rcStrict == VINF_SUCCESS)
7458 *pGCPtrBase = uTmp;
7459 }
7460 else
7461 {
7462 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7463 if (rcStrict == VINF_SUCCESS)
7464 {
7465 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7466 if (rcStrict == VINF_SUCCESS)
7467 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7468 }
7469 }
7470 }
7471 return rcStrict;
7472}
7473
7474
7475/**
7476 * Stores a data dqword, SSE aligned.
7477 *
7478 * @returns Strict VBox status code.
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param iSegReg The index of the segment register to use for
7481 * this access. The base and limits are checked.
7482 * @param GCPtrMem The address of the guest memory.
7483 * @param u128Value The value to store.
7484 */
7485VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7486{
7487 /* The lazy approach for now... */
7488 uint8_t bUnmapInfo;
7489 PRTUINT128U pu128Dst;
7490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7491 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7492 if (rc == VINF_SUCCESS)
7493 {
7494 pu128Dst->au64[0] = u128Value.au64[0];
7495 pu128Dst->au64[1] = u128Value.au64[1];
7496 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7497 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7498 }
7499 return rc;
7500}
7501
7502
7503#ifdef IEM_WITH_SETJMP
7504/**
7505 * Stores a data dqword, SSE aligned.
7506 *
7507 * @returns Strict VBox status code.
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 * @param iSegReg The index of the segment register to use for
7510 * this access. The base and limits are checked.
7511 * @param GCPtrMem The address of the guest memory.
7512 * @param u128Value The value to store.
7513 */
7514void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7515 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7516{
7517 /* The lazy approach for now... */
7518 uint8_t bUnmapInfo;
7519 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7520 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7521 pu128Dst->au64[0] = u128Value.au64[0];
7522 pu128Dst->au64[1] = u128Value.au64[1];
7523 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7524 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7525}
7526#endif
7527
7528
7529/**
7530 * Stores a data dqword.
7531 *
7532 * @returns Strict VBox status code.
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 * @param iSegReg The index of the segment register to use for
7535 * this access. The base and limits are checked.
7536 * @param GCPtrMem The address of the guest memory.
7537 * @param pu256Value Pointer to the value to store.
7538 */
7539VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7540{
7541 /* The lazy approach for now... */
7542 uint8_t bUnmapInfo;
7543 PRTUINT256U pu256Dst;
7544 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7545 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7546 if (rc == VINF_SUCCESS)
7547 {
7548 pu256Dst->au64[0] = pu256Value->au64[0];
7549 pu256Dst->au64[1] = pu256Value->au64[1];
7550 pu256Dst->au64[2] = pu256Value->au64[2];
7551 pu256Dst->au64[3] = pu256Value->au64[3];
7552 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7553 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7554 }
7555 return rc;
7556}
7557
7558
7559#ifdef IEM_WITH_SETJMP
7560/**
7561 * Stores a data dqword, longjmp on error.
7562 *
7563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7564 * @param iSegReg The index of the segment register to use for
7565 * this access. The base and limits are checked.
7566 * @param GCPtrMem The address of the guest memory.
7567 * @param pu256Value Pointer to the value to store.
7568 */
7569void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7570{
7571 /* The lazy approach for now... */
7572 uint8_t bUnmapInfo;
7573 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7574 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7575 pu256Dst->au64[0] = pu256Value->au64[0];
7576 pu256Dst->au64[1] = pu256Value->au64[1];
7577 pu256Dst->au64[2] = pu256Value->au64[2];
7578 pu256Dst->au64[3] = pu256Value->au64[3];
7579 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7580 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7581}
7582#endif
7583
7584
7585/**
7586 * Stores a data dqword, AVX \#GP(0) aligned.
7587 *
7588 * @returns Strict VBox status code.
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 * @param pu256Value Pointer to the value to store.
7594 */
7595VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7596{
7597 /* The lazy approach for now... */
7598 uint8_t bUnmapInfo;
7599 PRTUINT256U pu256Dst;
7600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7601 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7602 if (rc == VINF_SUCCESS)
7603 {
7604 pu256Dst->au64[0] = pu256Value->au64[0];
7605 pu256Dst->au64[1] = pu256Value->au64[1];
7606 pu256Dst->au64[2] = pu256Value->au64[2];
7607 pu256Dst->au64[3] = pu256Value->au64[3];
7608 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7609 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7610 }
7611 return rc;
7612}
7613
7614
7615#ifdef IEM_WITH_SETJMP
7616/**
7617 * Stores a data dqword, AVX aligned.
7618 *
7619 * @returns Strict VBox status code.
7620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7621 * @param iSegReg The index of the segment register to use for
7622 * this access. The base and limits are checked.
7623 * @param GCPtrMem The address of the guest memory.
7624 * @param pu256Value Pointer to the value to store.
7625 */
7626void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7627 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7628{
7629 /* The lazy approach for now... */
7630 uint8_t bUnmapInfo;
7631 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7632 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7633 pu256Dst->au64[0] = pu256Value->au64[0];
7634 pu256Dst->au64[1] = pu256Value->au64[1];
7635 pu256Dst->au64[2] = pu256Value->au64[2];
7636 pu256Dst->au64[3] = pu256Value->au64[3];
7637 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7638 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7639}
7640#endif
7641
7642
7643/**
7644 * Stores a descriptor register (sgdt, sidt).
7645 *
7646 * @returns Strict VBox status code.
7647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7648 * @param cbLimit The limit.
7649 * @param GCPtrBase The base address.
7650 * @param iSegReg The index of the segment register to use for
7651 * this access. The base and limits are checked.
7652 * @param GCPtrMem The address of the guest memory.
7653 */
7654VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7655{
7656 /*
7657 * The SIDT and SGDT instructions actually stores the data using two
7658 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7659 * does not respond to opsize prefixes.
7660 */
7661 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7662 if (rcStrict == VINF_SUCCESS)
7663 {
7664 if (IEM_IS_16BIT_CODE(pVCpu))
7665 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7666 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7667 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7668 else if (IEM_IS_32BIT_CODE(pVCpu))
7669 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7670 else
7671 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7672 }
7673 return rcStrict;
7674}
7675
7676
7677/**
7678 * Begin a special stack push (used by interrupt, exceptions and such).
7679 *
7680 * This will raise \#SS or \#PF if appropriate.
7681 *
7682 * @returns Strict VBox status code.
7683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7684 * @param cbMem The number of bytes to push onto the stack.
7685 * @param cbAlign The alignment mask (7, 3, 1).
7686 * @param ppvMem Where to return the pointer to the stack memory.
7687 * As with the other memory functions this could be
7688 * direct access or bounce buffered access, so
7689 * don't commit register until the commit call
7690 * succeeds.
7691 * @param pbUnmapInfo Where to store unmap info for
7692 * iemMemStackPushCommitSpecial.
7693 * @param puNewRsp Where to return the new RSP value. This must be
7694 * passed unchanged to
7695 * iemMemStackPushCommitSpecial().
7696 */
7697VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7698 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7699{
7700 Assert(cbMem < UINT8_MAX);
7701 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7702 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7703}
7704
7705
7706/**
7707 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7708 *
7709 * This will update the rSP.
7710 *
7711 * @returns Strict VBox status code.
7712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7713 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7714 * @param uNewRsp The new RSP value returned by
7715 * iemMemStackPushBeginSpecial().
7716 */
7717VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7718{
7719 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7720 if (rcStrict == VINF_SUCCESS)
7721 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7722 return rcStrict;
7723}
7724
7725
7726/**
7727 * Begin a special stack pop (used by iret, retf and such).
7728 *
7729 * This will raise \#SS or \#PF if appropriate.
7730 *
7731 * @returns Strict VBox status code.
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param cbMem The number of bytes to pop from the stack.
7734 * @param cbAlign The alignment mask (7, 3, 1).
7735 * @param ppvMem Where to return the pointer to the stack memory.
7736 * @param pbUnmapInfo Where to store unmap info for
7737 * iemMemStackPopDoneSpecial.
7738 * @param puNewRsp Where to return the new RSP value. This must be
7739 * assigned to CPUMCTX::rsp manually some time
7740 * after iemMemStackPopDoneSpecial() has been
7741 * called.
7742 */
7743VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7744 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7745{
7746 Assert(cbMem < UINT8_MAX);
7747 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7748 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7749}
7750
7751
7752/**
7753 * Continue a special stack pop (used by iret and retf), for the purpose of
7754 * retrieving a new stack pointer.
7755 *
7756 * This will raise \#SS or \#PF if appropriate.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7760 * @param off Offset from the top of the stack. This is zero
7761 * except in the retf case.
7762 * @param cbMem The number of bytes to pop from the stack.
7763 * @param ppvMem Where to return the pointer to the stack memory.
7764 * @param pbUnmapInfo Where to store unmap info for
7765 * iemMemStackPopDoneSpecial.
7766 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7767 * return this because all use of this function is
7768 * to retrieve a new value and anything we return
7769 * here would be discarded.)
7770 */
7771VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7772 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7773{
7774 Assert(cbMem < UINT8_MAX);
7775
7776 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7777 RTGCPTR GCPtrTop;
7778 if (IEM_IS_64BIT_CODE(pVCpu))
7779 GCPtrTop = uCurNewRsp;
7780 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7781 GCPtrTop = (uint32_t)uCurNewRsp;
7782 else
7783 GCPtrTop = (uint16_t)uCurNewRsp;
7784
7785 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7786 0 /* checked in iemMemStackPopBeginSpecial */);
7787}
7788
7789
7790/**
7791 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7792 * iemMemStackPopContinueSpecial).
7793 *
7794 * The caller will manually commit the rSP.
7795 *
7796 * @returns Strict VBox status code.
7797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7798 * @param bUnmapInfo Unmap information returned by
7799 * iemMemStackPopBeginSpecial() or
7800 * iemMemStackPopContinueSpecial().
7801 */
7802VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7803{
7804 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7805}
7806
7807
7808/**
7809 * Fetches a system table byte.
7810 *
7811 * @returns Strict VBox status code.
7812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7813 * @param pbDst Where to return the byte.
7814 * @param iSegReg The index of the segment register to use for
7815 * this access. The base and limits are checked.
7816 * @param GCPtrMem The address of the guest memory.
7817 */
7818VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7819{
7820 /* The lazy approach for now... */
7821 uint8_t bUnmapInfo;
7822 uint8_t const *pbSrc;
7823 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7824 if (rc == VINF_SUCCESS)
7825 {
7826 *pbDst = *pbSrc;
7827 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7828 }
7829 return rc;
7830}
7831
7832
7833/**
7834 * Fetches a system table word.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param pu16Dst Where to return the word.
7839 * @param iSegReg The index of the segment register to use for
7840 * this access. The base and limits are checked.
7841 * @param GCPtrMem The address of the guest memory.
7842 */
7843VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7844{
7845 /* The lazy approach for now... */
7846 uint8_t bUnmapInfo;
7847 uint16_t const *pu16Src;
7848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7849 if (rc == VINF_SUCCESS)
7850 {
7851 *pu16Dst = *pu16Src;
7852 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7853 }
7854 return rc;
7855}
7856
7857
7858/**
7859 * Fetches a system table dword.
7860 *
7861 * @returns Strict VBox status code.
7862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7863 * @param pu32Dst Where to return the dword.
7864 * @param iSegReg The index of the segment register to use for
7865 * this access. The base and limits are checked.
7866 * @param GCPtrMem The address of the guest memory.
7867 */
7868VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7869{
7870 /* The lazy approach for now... */
7871 uint8_t bUnmapInfo;
7872 uint32_t const *pu32Src;
7873 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7874 if (rc == VINF_SUCCESS)
7875 {
7876 *pu32Dst = *pu32Src;
7877 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7878 }
7879 return rc;
7880}
7881
7882
7883/**
7884 * Fetches a system table qword.
7885 *
7886 * @returns Strict VBox status code.
7887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7888 * @param pu64Dst Where to return the qword.
7889 * @param iSegReg The index of the segment register to use for
7890 * this access. The base and limits are checked.
7891 * @param GCPtrMem The address of the guest memory.
7892 */
7893VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7894{
7895 /* The lazy approach for now... */
7896 uint8_t bUnmapInfo;
7897 uint64_t const *pu64Src;
7898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7899 if (rc == VINF_SUCCESS)
7900 {
7901 *pu64Dst = *pu64Src;
7902 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7903 }
7904 return rc;
7905}
7906
7907
7908/**
7909 * Fetches a descriptor table entry with caller specified error code.
7910 *
7911 * @returns Strict VBox status code.
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param pDesc Where to return the descriptor table entry.
7914 * @param uSel The selector which table entry to fetch.
7915 * @param uXcpt The exception to raise on table lookup error.
7916 * @param uErrorCode The error code associated with the exception.
7917 */
7918static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7919 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7920{
7921 AssertPtr(pDesc);
7922 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7923
7924 /** @todo did the 286 require all 8 bytes to be accessible? */
7925 /*
7926 * Get the selector table base and check bounds.
7927 */
7928 RTGCPTR GCPtrBase;
7929 if (uSel & X86_SEL_LDT)
7930 {
7931 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7932 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7933 {
7934 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7935 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7936 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7937 uErrorCode, 0);
7938 }
7939
7940 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7941 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7942 }
7943 else
7944 {
7945 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7946 {
7947 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7948 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7949 uErrorCode, 0);
7950 }
7951 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7952 }
7953
7954 /*
7955 * Read the legacy descriptor and maybe the long mode extensions if
7956 * required.
7957 */
7958 VBOXSTRICTRC rcStrict;
7959 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7960 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7961 else
7962 {
7963 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7964 if (rcStrict == VINF_SUCCESS)
7965 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7966 if (rcStrict == VINF_SUCCESS)
7967 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7968 if (rcStrict == VINF_SUCCESS)
7969 pDesc->Legacy.au16[3] = 0;
7970 else
7971 return rcStrict;
7972 }
7973
7974 if (rcStrict == VINF_SUCCESS)
7975 {
7976 if ( !IEM_IS_LONG_MODE(pVCpu)
7977 || pDesc->Legacy.Gen.u1DescType)
7978 pDesc->Long.au64[1] = 0;
7979 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7980 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7981 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7982 else
7983 {
7984 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7985 /** @todo is this the right exception? */
7986 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7987 }
7988 }
7989 return rcStrict;
7990}
7991
7992
7993/**
7994 * Fetches a descriptor table entry.
7995 *
7996 * @returns Strict VBox status code.
7997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7998 * @param pDesc Where to return the descriptor table entry.
7999 * @param uSel The selector which table entry to fetch.
8000 * @param uXcpt The exception to raise on table lookup error.
8001 */
8002VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8003{
8004 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8005}
8006
8007
8008/**
8009 * Marks the selector descriptor as accessed (only non-system descriptors).
8010 *
8011 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8012 * will therefore skip the limit checks.
8013 *
8014 * @returns Strict VBox status code.
8015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8016 * @param uSel The selector.
8017 */
8018VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8019{
8020 /*
8021 * Get the selector table base and calculate the entry address.
8022 */
8023 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8024 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8025 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8026 GCPtr += uSel & X86_SEL_MASK;
8027
8028 /*
8029 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8030 * ugly stuff to avoid this. This will make sure it's an atomic access
8031 * as well more or less remove any question about 8-bit or 32-bit accesss.
8032 */
8033 VBOXSTRICTRC rcStrict;
8034 uint8_t bUnmapInfo;
8035 uint32_t volatile *pu32;
8036 if ((GCPtr & 3) == 0)
8037 {
8038 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8039 GCPtr += 2 + 2;
8040 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8041 if (rcStrict != VINF_SUCCESS)
8042 return rcStrict;
8043 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8044 }
8045 else
8046 {
8047 /* The misaligned GDT/LDT case, map the whole thing. */
8048 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8049 if (rcStrict != VINF_SUCCESS)
8050 return rcStrict;
8051 switch ((uintptr_t)pu32 & 3)
8052 {
8053 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8054 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8055 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8056 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8057 }
8058 }
8059
8060 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8061}
8062
8063
8064#undef LOG_GROUP
8065#define LOG_GROUP LOG_GROUP_IEM
8066
8067/** @} */
8068
8069/** @name Opcode Helpers.
8070 * @{
8071 */
8072
8073/**
8074 * Calculates the effective address of a ModR/M memory operand.
8075 *
8076 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8077 *
8078 * @return Strict VBox status code.
8079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8080 * @param bRm The ModRM byte.
8081 * @param cbImmAndRspOffset - First byte: The size of any immediate
8082 * following the effective address opcode bytes
8083 * (only for RIP relative addressing).
8084 * - Second byte: RSP displacement (for POP [ESP]).
8085 * @param pGCPtrEff Where to return the effective address.
8086 */
8087VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8088{
8089 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8090# define SET_SS_DEF() \
8091 do \
8092 { \
8093 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8094 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8095 } while (0)
8096
8097 if (!IEM_IS_64BIT_CODE(pVCpu))
8098 {
8099/** @todo Check the effective address size crap! */
8100 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8101 {
8102 uint16_t u16EffAddr;
8103
8104 /* Handle the disp16 form with no registers first. */
8105 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8106 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8107 else
8108 {
8109 /* Get the displacment. */
8110 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8111 {
8112 case 0: u16EffAddr = 0; break;
8113 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8114 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8115 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8116 }
8117
8118 /* Add the base and index registers to the disp. */
8119 switch (bRm & X86_MODRM_RM_MASK)
8120 {
8121 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8122 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8123 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8124 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8125 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8126 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8127 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8128 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8129 }
8130 }
8131
8132 *pGCPtrEff = u16EffAddr;
8133 }
8134 else
8135 {
8136 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8137 uint32_t u32EffAddr;
8138
8139 /* Handle the disp32 form with no registers first. */
8140 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8141 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8142 else
8143 {
8144 /* Get the register (or SIB) value. */
8145 switch ((bRm & X86_MODRM_RM_MASK))
8146 {
8147 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8148 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8149 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8150 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8151 case 4: /* SIB */
8152 {
8153 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8154
8155 /* Get the index and scale it. */
8156 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8157 {
8158 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8159 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8160 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8161 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8162 case 4: u32EffAddr = 0; /*none */ break;
8163 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8164 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8165 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8167 }
8168 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8169
8170 /* add base */
8171 switch (bSib & X86_SIB_BASE_MASK)
8172 {
8173 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8174 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8175 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8176 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8177 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8178 case 5:
8179 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8180 {
8181 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8182 SET_SS_DEF();
8183 }
8184 else
8185 {
8186 uint32_t u32Disp;
8187 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8188 u32EffAddr += u32Disp;
8189 }
8190 break;
8191 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8192 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8194 }
8195 break;
8196 }
8197 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8198 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8199 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8200 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8201 }
8202
8203 /* Get and add the displacement. */
8204 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8205 {
8206 case 0:
8207 break;
8208 case 1:
8209 {
8210 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8211 u32EffAddr += i8Disp;
8212 break;
8213 }
8214 case 2:
8215 {
8216 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8217 u32EffAddr += u32Disp;
8218 break;
8219 }
8220 default:
8221 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8222 }
8223
8224 }
8225 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8226 *pGCPtrEff = u32EffAddr;
8227 }
8228 }
8229 else
8230 {
8231 uint64_t u64EffAddr;
8232
8233 /* Handle the rip+disp32 form with no registers first. */
8234 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8235 {
8236 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8237 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8238 }
8239 else
8240 {
8241 /* Get the register (or SIB) value. */
8242 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8243 {
8244 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8245 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8246 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8247 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8248 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8249 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8250 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8251 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8252 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8253 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8254 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8255 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8256 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8257 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8258 /* SIB */
8259 case 4:
8260 case 12:
8261 {
8262 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8263
8264 /* Get the index and scale it. */
8265 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8266 {
8267 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8268 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8269 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8270 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8271 case 4: u64EffAddr = 0; /*none */ break;
8272 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8273 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8274 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8275 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8276 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8277 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8278 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8279 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8280 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8281 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8282 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8284 }
8285 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8286
8287 /* add base */
8288 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8289 {
8290 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8291 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8292 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8293 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8294 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8295 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8296 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8297 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8298 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8299 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8300 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8301 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8302 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8303 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8304 /* complicated encodings */
8305 case 5:
8306 case 13:
8307 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8308 {
8309 if (!pVCpu->iem.s.uRexB)
8310 {
8311 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8312 SET_SS_DEF();
8313 }
8314 else
8315 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8316 }
8317 else
8318 {
8319 uint32_t u32Disp;
8320 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8321 u64EffAddr += (int32_t)u32Disp;
8322 }
8323 break;
8324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8325 }
8326 break;
8327 }
8328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8329 }
8330
8331 /* Get and add the displacement. */
8332 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8333 {
8334 case 0:
8335 break;
8336 case 1:
8337 {
8338 int8_t i8Disp;
8339 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8340 u64EffAddr += i8Disp;
8341 break;
8342 }
8343 case 2:
8344 {
8345 uint32_t u32Disp;
8346 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8347 u64EffAddr += (int32_t)u32Disp;
8348 break;
8349 }
8350 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8351 }
8352
8353 }
8354
8355 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8356 *pGCPtrEff = u64EffAddr;
8357 else
8358 {
8359 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8360 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8361 }
8362 }
8363
8364 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8365 return VINF_SUCCESS;
8366}
8367
8368
8369#ifdef IEM_WITH_SETJMP
8370/**
8371 * Calculates the effective address of a ModR/M memory operand.
8372 *
8373 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8374 *
8375 * May longjmp on internal error.
8376 *
8377 * @return The effective address.
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param bRm The ModRM byte.
8380 * @param cbImmAndRspOffset - First byte: The size of any immediate
8381 * following the effective address opcode bytes
8382 * (only for RIP relative addressing).
8383 * - Second byte: RSP displacement (for POP [ESP]).
8384 */
8385RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8386{
8387 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8388# define SET_SS_DEF() \
8389 do \
8390 { \
8391 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8392 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8393 } while (0)
8394
8395 if (!IEM_IS_64BIT_CODE(pVCpu))
8396 {
8397/** @todo Check the effective address size crap! */
8398 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8399 {
8400 uint16_t u16EffAddr;
8401
8402 /* Handle the disp16 form with no registers first. */
8403 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8404 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8405 else
8406 {
8407 /* Get the displacment. */
8408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8409 {
8410 case 0: u16EffAddr = 0; break;
8411 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8412 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8413 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8414 }
8415
8416 /* Add the base and index registers to the disp. */
8417 switch (bRm & X86_MODRM_RM_MASK)
8418 {
8419 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8420 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8421 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8422 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8423 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8424 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8425 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8426 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8427 }
8428 }
8429
8430 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8431 return u16EffAddr;
8432 }
8433
8434 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8435 uint32_t u32EffAddr;
8436
8437 /* Handle the disp32 form with no registers first. */
8438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8439 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8440 else
8441 {
8442 /* Get the register (or SIB) value. */
8443 switch ((bRm & X86_MODRM_RM_MASK))
8444 {
8445 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8446 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8447 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8448 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8449 case 4: /* SIB */
8450 {
8451 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8452
8453 /* Get the index and scale it. */
8454 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8455 {
8456 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8457 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8458 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8459 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8460 case 4: u32EffAddr = 0; /*none */ break;
8461 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8462 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8463 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8464 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8465 }
8466 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8467
8468 /* add base */
8469 switch (bSib & X86_SIB_BASE_MASK)
8470 {
8471 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8472 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8473 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8474 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8475 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8476 case 5:
8477 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8478 {
8479 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8480 SET_SS_DEF();
8481 }
8482 else
8483 {
8484 uint32_t u32Disp;
8485 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8486 u32EffAddr += u32Disp;
8487 }
8488 break;
8489 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8490 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8491 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8492 }
8493 break;
8494 }
8495 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8496 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8497 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8498 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8499 }
8500
8501 /* Get and add the displacement. */
8502 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8503 {
8504 case 0:
8505 break;
8506 case 1:
8507 {
8508 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8509 u32EffAddr += i8Disp;
8510 break;
8511 }
8512 case 2:
8513 {
8514 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8515 u32EffAddr += u32Disp;
8516 break;
8517 }
8518 default:
8519 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8520 }
8521 }
8522
8523 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8524 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8525 return u32EffAddr;
8526 }
8527
8528 uint64_t u64EffAddr;
8529
8530 /* Handle the rip+disp32 form with no registers first. */
8531 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8532 {
8533 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8534 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8535 }
8536 else
8537 {
8538 /* Get the register (or SIB) value. */
8539 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8540 {
8541 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8542 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8543 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8544 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8545 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8546 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8547 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8548 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8549 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8550 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8551 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8552 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8553 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8554 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8555 /* SIB */
8556 case 4:
8557 case 12:
8558 {
8559 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8560
8561 /* Get the index and scale it. */
8562 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8563 {
8564 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8565 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8566 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8567 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8568 case 4: u64EffAddr = 0; /*none */ break;
8569 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8570 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8571 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8572 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8573 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8574 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8575 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8576 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8577 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8578 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8579 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8580 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8581 }
8582 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8583
8584 /* add base */
8585 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8586 {
8587 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8588 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8589 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8590 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8591 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8592 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8593 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8594 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8595 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8596 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8597 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8598 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8599 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8600 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8601 /* complicated encodings */
8602 case 5:
8603 case 13:
8604 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8605 {
8606 if (!pVCpu->iem.s.uRexB)
8607 {
8608 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8609 SET_SS_DEF();
8610 }
8611 else
8612 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8613 }
8614 else
8615 {
8616 uint32_t u32Disp;
8617 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8618 u64EffAddr += (int32_t)u32Disp;
8619 }
8620 break;
8621 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8622 }
8623 break;
8624 }
8625 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8626 }
8627
8628 /* Get and add the displacement. */
8629 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8630 {
8631 case 0:
8632 break;
8633 case 1:
8634 {
8635 int8_t i8Disp;
8636 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8637 u64EffAddr += i8Disp;
8638 break;
8639 }
8640 case 2:
8641 {
8642 uint32_t u32Disp;
8643 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8644 u64EffAddr += (int32_t)u32Disp;
8645 break;
8646 }
8647 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8648 }
8649
8650 }
8651
8652 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8653 {
8654 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8655 return u64EffAddr;
8656 }
8657 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8658 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8659 return u64EffAddr & UINT32_MAX;
8660}
8661#endif /* IEM_WITH_SETJMP */
8662
8663
8664/**
8665 * Calculates the effective address of a ModR/M memory operand, extended version
8666 * for use in the recompilers.
8667 *
8668 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8669 *
8670 * @return Strict VBox status code.
8671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8672 * @param bRm The ModRM byte.
8673 * @param cbImmAndRspOffset - First byte: The size of any immediate
8674 * following the effective address opcode bytes
8675 * (only for RIP relative addressing).
8676 * - Second byte: RSP displacement (for POP [ESP]).
8677 * @param pGCPtrEff Where to return the effective address.
8678 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8679 * SIB byte (bits 39:32).
8680 */
8681VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8682{
8683 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8684# define SET_SS_DEF() \
8685 do \
8686 { \
8687 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8688 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8689 } while (0)
8690
8691 uint64_t uInfo;
8692 if (!IEM_IS_64BIT_CODE(pVCpu))
8693 {
8694/** @todo Check the effective address size crap! */
8695 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8696 {
8697 uint16_t u16EffAddr;
8698
8699 /* Handle the disp16 form with no registers first. */
8700 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8701 {
8702 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8703 uInfo = u16EffAddr;
8704 }
8705 else
8706 {
8707 /* Get the displacment. */
8708 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8709 {
8710 case 0: u16EffAddr = 0; break;
8711 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8712 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8713 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8714 }
8715 uInfo = u16EffAddr;
8716
8717 /* Add the base and index registers to the disp. */
8718 switch (bRm & X86_MODRM_RM_MASK)
8719 {
8720 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8721 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8722 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8723 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8724 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8725 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8726 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8727 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8728 }
8729 }
8730
8731 *pGCPtrEff = u16EffAddr;
8732 }
8733 else
8734 {
8735 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8736 uint32_t u32EffAddr;
8737
8738 /* Handle the disp32 form with no registers first. */
8739 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8740 {
8741 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8742 uInfo = u32EffAddr;
8743 }
8744 else
8745 {
8746 /* Get the register (or SIB) value. */
8747 uInfo = 0;
8748 switch ((bRm & X86_MODRM_RM_MASK))
8749 {
8750 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8751 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8752 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8753 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8754 case 4: /* SIB */
8755 {
8756 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8757 uInfo = (uint64_t)bSib << 32;
8758
8759 /* Get the index and scale it. */
8760 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8761 {
8762 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8763 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8764 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8765 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8766 case 4: u32EffAddr = 0; /*none */ break;
8767 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8768 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8769 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8771 }
8772 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8773
8774 /* add base */
8775 switch (bSib & X86_SIB_BASE_MASK)
8776 {
8777 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8778 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8779 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8780 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8781 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8782 case 5:
8783 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8784 {
8785 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8786 SET_SS_DEF();
8787 }
8788 else
8789 {
8790 uint32_t u32Disp;
8791 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8792 u32EffAddr += u32Disp;
8793 uInfo |= u32Disp;
8794 }
8795 break;
8796 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8797 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8799 }
8800 break;
8801 }
8802 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8803 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8804 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8806 }
8807
8808 /* Get and add the displacement. */
8809 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8810 {
8811 case 0:
8812 break;
8813 case 1:
8814 {
8815 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8816 u32EffAddr += i8Disp;
8817 uInfo |= (uint32_t)(int32_t)i8Disp;
8818 break;
8819 }
8820 case 2:
8821 {
8822 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8823 u32EffAddr += u32Disp;
8824 uInfo |= (uint32_t)u32Disp;
8825 break;
8826 }
8827 default:
8828 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8829 }
8830
8831 }
8832 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8833 *pGCPtrEff = u32EffAddr;
8834 }
8835 }
8836 else
8837 {
8838 uint64_t u64EffAddr;
8839
8840 /* Handle the rip+disp32 form with no registers first. */
8841 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8842 {
8843 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8844 uInfo = (uint32_t)u64EffAddr;
8845 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8846 }
8847 else
8848 {
8849 /* Get the register (or SIB) value. */
8850 uInfo = 0;
8851 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8852 {
8853 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8854 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8855 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8856 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8857 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8858 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8859 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8860 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8861 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8862 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8863 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8864 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8865 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8866 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8867 /* SIB */
8868 case 4:
8869 case 12:
8870 {
8871 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8872 uInfo = (uint64_t)bSib << 32;
8873
8874 /* Get the index and scale it. */
8875 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8876 {
8877 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8878 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8879 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8880 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8881 case 4: u64EffAddr = 0; /*none */ break;
8882 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8883 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8884 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8885 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8886 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8887 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8888 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8889 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8890 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8891 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8892 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8893 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8894 }
8895 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8896
8897 /* add base */
8898 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8899 {
8900 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8901 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8902 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8903 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8904 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8905 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8906 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8907 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8908 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8909 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8910 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8911 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8912 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8913 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8914 /* complicated encodings */
8915 case 5:
8916 case 13:
8917 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8918 {
8919 if (!pVCpu->iem.s.uRexB)
8920 {
8921 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8922 SET_SS_DEF();
8923 }
8924 else
8925 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8926 }
8927 else
8928 {
8929 uint32_t u32Disp;
8930 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8931 u64EffAddr += (int32_t)u32Disp;
8932 uInfo |= u32Disp;
8933 }
8934 break;
8935 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8936 }
8937 break;
8938 }
8939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8940 }
8941
8942 /* Get and add the displacement. */
8943 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8944 {
8945 case 0:
8946 break;
8947 case 1:
8948 {
8949 int8_t i8Disp;
8950 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8951 u64EffAddr += i8Disp;
8952 uInfo |= (uint32_t)(int32_t)i8Disp;
8953 break;
8954 }
8955 case 2:
8956 {
8957 uint32_t u32Disp;
8958 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8959 u64EffAddr += (int32_t)u32Disp;
8960 uInfo |= u32Disp;
8961 break;
8962 }
8963 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8964 }
8965
8966 }
8967
8968 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8969 *pGCPtrEff = u64EffAddr;
8970 else
8971 {
8972 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8973 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8974 }
8975 }
8976 *puInfo = uInfo;
8977
8978 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8979 return VINF_SUCCESS;
8980}
8981
8982/** @} */
8983
8984
8985#ifdef LOG_ENABLED
8986/**
8987 * Logs the current instruction.
8988 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8989 * @param fSameCtx Set if we have the same context information as the VMM,
8990 * clear if we may have already executed an instruction in
8991 * our debug context. When clear, we assume IEMCPU holds
8992 * valid CPU mode info.
8993 *
8994 * The @a fSameCtx parameter is now misleading and obsolete.
8995 * @param pszFunction The IEM function doing the execution.
8996 */
8997static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8998{
8999# ifdef IN_RING3
9000 if (LogIs2Enabled())
9001 {
9002 char szInstr[256];
9003 uint32_t cbInstr = 0;
9004 if (fSameCtx)
9005 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9006 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9007 szInstr, sizeof(szInstr), &cbInstr);
9008 else
9009 {
9010 uint32_t fFlags = 0;
9011 switch (IEM_GET_CPU_MODE(pVCpu))
9012 {
9013 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9014 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9015 case IEMMODE_16BIT:
9016 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9017 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9018 else
9019 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9020 break;
9021 }
9022 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9023 szInstr, sizeof(szInstr), &cbInstr);
9024 }
9025
9026 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9027 Log2(("**** %s fExec=%x\n"
9028 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9029 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9030 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9031 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9032 " %s\n"
9033 , pszFunction, pVCpu->iem.s.fExec,
9034 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9035 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9036 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9037 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9038 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9039 szInstr));
9040
9041 /* This stuff sucks atm. as it fills the log with MSRs. */
9042 //if (LogIs3Enabled())
9043 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9044 }
9045 else
9046# endif
9047 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9048 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9049 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9050}
9051#endif /* LOG_ENABLED */
9052
9053
9054#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9055/**
9056 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9057 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9058 *
9059 * @returns Modified rcStrict.
9060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9061 * @param rcStrict The instruction execution status.
9062 */
9063static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9064{
9065 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9066 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9067 {
9068 /* VMX preemption timer takes priority over NMI-window exits. */
9069 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9070 {
9071 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9072 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9073 }
9074 /*
9075 * Check remaining intercepts.
9076 *
9077 * NMI-window and Interrupt-window VM-exits.
9078 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9079 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9080 *
9081 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9082 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9083 */
9084 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9085 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9086 && !TRPMHasTrap(pVCpu))
9087 {
9088 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9089 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9090 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9091 {
9092 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9093 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9094 }
9095 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9096 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9097 {
9098 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9099 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9100 }
9101 }
9102 }
9103 /* TPR-below threshold/APIC write has the highest priority. */
9104 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9105 {
9106 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9107 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9108 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9109 }
9110 /* MTF takes priority over VMX-preemption timer. */
9111 else
9112 {
9113 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9114 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9115 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9116 }
9117 return rcStrict;
9118}
9119#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9120
9121
9122/**
9123 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9124 * IEMExecOneWithPrefetchedByPC.
9125 *
9126 * Similar code is found in IEMExecLots.
9127 *
9128 * @return Strict VBox status code.
9129 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9130 * @param fExecuteInhibit If set, execute the instruction following CLI,
9131 * POP SS and MOV SS,GR.
9132 * @param pszFunction The calling function name.
9133 */
9134DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9135{
9136 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9137 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9138 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9139 RT_NOREF_PV(pszFunction);
9140
9141#ifdef IEM_WITH_SETJMP
9142 VBOXSTRICTRC rcStrict;
9143 IEM_TRY_SETJMP(pVCpu, rcStrict)
9144 {
9145 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9146 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9147 }
9148 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9149 {
9150 pVCpu->iem.s.cLongJumps++;
9151 }
9152 IEM_CATCH_LONGJMP_END(pVCpu);
9153#else
9154 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9155 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9156#endif
9157 if (rcStrict == VINF_SUCCESS)
9158 pVCpu->iem.s.cInstructions++;
9159 if (pVCpu->iem.s.cActiveMappings > 0)
9160 {
9161 Assert(rcStrict != VINF_SUCCESS);
9162 iemMemRollback(pVCpu);
9163 }
9164 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9165 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9166 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9167
9168//#ifdef DEBUG
9169// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9170//#endif
9171
9172#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9173 /*
9174 * Perform any VMX nested-guest instruction boundary actions.
9175 *
9176 * If any of these causes a VM-exit, we must skip executing the next
9177 * instruction (would run into stale page tables). A VM-exit makes sure
9178 * there is no interrupt-inhibition, so that should ensure we don't go
9179 * to try execute the next instruction. Clearing fExecuteInhibit is
9180 * problematic because of the setjmp/longjmp clobbering above.
9181 */
9182 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9183 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9184 || rcStrict != VINF_SUCCESS)
9185 { /* likely */ }
9186 else
9187 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9188#endif
9189
9190 /* Execute the next instruction as well if a cli, pop ss or
9191 mov ss, Gr has just completed successfully. */
9192 if ( fExecuteInhibit
9193 && rcStrict == VINF_SUCCESS
9194 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9195 {
9196 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9197 if (rcStrict == VINF_SUCCESS)
9198 {
9199#ifdef LOG_ENABLED
9200 iemLogCurInstr(pVCpu, false, pszFunction);
9201#endif
9202#ifdef IEM_WITH_SETJMP
9203 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9204 {
9205 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9206 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9207 }
9208 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9209 {
9210 pVCpu->iem.s.cLongJumps++;
9211 }
9212 IEM_CATCH_LONGJMP_END(pVCpu);
9213#else
9214 IEM_OPCODE_GET_FIRST_U8(&b);
9215 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9216#endif
9217 if (rcStrict == VINF_SUCCESS)
9218 {
9219 pVCpu->iem.s.cInstructions++;
9220#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9221 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9222 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9223 { /* likely */ }
9224 else
9225 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9226#endif
9227 }
9228 if (pVCpu->iem.s.cActiveMappings > 0)
9229 {
9230 Assert(rcStrict != VINF_SUCCESS);
9231 iemMemRollback(pVCpu);
9232 }
9233 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9234 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9235 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9236 }
9237 else if (pVCpu->iem.s.cActiveMappings > 0)
9238 iemMemRollback(pVCpu);
9239 /** @todo drop this after we bake this change into RIP advancing. */
9240 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9241 }
9242
9243 /*
9244 * Return value fiddling, statistics and sanity assertions.
9245 */
9246 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9247
9248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9250 return rcStrict;
9251}
9252
9253
9254/**
9255 * Execute one instruction.
9256 *
9257 * @return Strict VBox status code.
9258 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9259 */
9260VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9261{
9262 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9263#ifdef LOG_ENABLED
9264 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9265#endif
9266
9267 /*
9268 * Do the decoding and emulation.
9269 */
9270 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9271 if (rcStrict == VINF_SUCCESS)
9272 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9273 else if (pVCpu->iem.s.cActiveMappings > 0)
9274 iemMemRollback(pVCpu);
9275
9276 if (rcStrict != VINF_SUCCESS)
9277 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9278 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9279 return rcStrict;
9280}
9281
9282
9283VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9284{
9285 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9286 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9287 if (rcStrict == VINF_SUCCESS)
9288 {
9289 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9290 if (pcbWritten)
9291 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9292 }
9293 else if (pVCpu->iem.s.cActiveMappings > 0)
9294 iemMemRollback(pVCpu);
9295
9296 return rcStrict;
9297}
9298
9299
9300VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9301 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9302{
9303 VBOXSTRICTRC rcStrict;
9304 if ( cbOpcodeBytes
9305 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9306 {
9307 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9308#ifdef IEM_WITH_CODE_TLB
9309 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9310 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9311 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9312 pVCpu->iem.s.offCurInstrStart = 0;
9313 pVCpu->iem.s.offInstrNextByte = 0;
9314 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9315#else
9316 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9317 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9318#endif
9319 rcStrict = VINF_SUCCESS;
9320 }
9321 else
9322 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9323 if (rcStrict == VINF_SUCCESS)
9324 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9325 else if (pVCpu->iem.s.cActiveMappings > 0)
9326 iemMemRollback(pVCpu);
9327
9328 return rcStrict;
9329}
9330
9331
9332VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9333{
9334 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9335 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9336 if (rcStrict == VINF_SUCCESS)
9337 {
9338 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9339 if (pcbWritten)
9340 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9341 }
9342 else if (pVCpu->iem.s.cActiveMappings > 0)
9343 iemMemRollback(pVCpu);
9344
9345 return rcStrict;
9346}
9347
9348
9349VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9350 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9351{
9352 VBOXSTRICTRC rcStrict;
9353 if ( cbOpcodeBytes
9354 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9355 {
9356 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9357#ifdef IEM_WITH_CODE_TLB
9358 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9359 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9360 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9361 pVCpu->iem.s.offCurInstrStart = 0;
9362 pVCpu->iem.s.offInstrNextByte = 0;
9363 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9364#else
9365 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9366 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9367#endif
9368 rcStrict = VINF_SUCCESS;
9369 }
9370 else
9371 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9372 if (rcStrict == VINF_SUCCESS)
9373 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9374 else if (pVCpu->iem.s.cActiveMappings > 0)
9375 iemMemRollback(pVCpu);
9376
9377 return rcStrict;
9378}
9379
9380
9381/**
9382 * For handling split cacheline lock operations when the host has split-lock
9383 * detection enabled.
9384 *
9385 * This will cause the interpreter to disregard the lock prefix and implicit
9386 * locking (xchg).
9387 *
9388 * @returns Strict VBox status code.
9389 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9390 */
9391VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9392{
9393 /*
9394 * Do the decoding and emulation.
9395 */
9396 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9397 if (rcStrict == VINF_SUCCESS)
9398 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9399 else if (pVCpu->iem.s.cActiveMappings > 0)
9400 iemMemRollback(pVCpu);
9401
9402 if (rcStrict != VINF_SUCCESS)
9403 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9404 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9405 return rcStrict;
9406}
9407
9408
9409/**
9410 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9411 * inject a pending TRPM trap.
9412 */
9413VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9414{
9415 Assert(TRPMHasTrap(pVCpu));
9416
9417 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9418 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9419 {
9420 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9421#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9422 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9423 if (fIntrEnabled)
9424 {
9425 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9426 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9427 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9428 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9429 else
9430 {
9431 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9432 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9433 }
9434 }
9435#else
9436 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9437#endif
9438 if (fIntrEnabled)
9439 {
9440 uint8_t u8TrapNo;
9441 TRPMEVENT enmType;
9442 uint32_t uErrCode;
9443 RTGCPTR uCr2;
9444 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9445 AssertRC(rc2);
9446 Assert(enmType == TRPM_HARDWARE_INT);
9447 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9448
9449 TRPMResetTrap(pVCpu);
9450
9451#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9452 /* Injecting an event may cause a VM-exit. */
9453 if ( rcStrict != VINF_SUCCESS
9454 && rcStrict != VINF_IEM_RAISED_XCPT)
9455 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9456#else
9457 NOREF(rcStrict);
9458#endif
9459 }
9460 }
9461
9462 return VINF_SUCCESS;
9463}
9464
9465
9466VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9467{
9468 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9469 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9470 Assert(cMaxInstructions > 0);
9471
9472 /*
9473 * See if there is an interrupt pending in TRPM, inject it if we can.
9474 */
9475 /** @todo What if we are injecting an exception and not an interrupt? Is that
9476 * possible here? For now we assert it is indeed only an interrupt. */
9477 if (!TRPMHasTrap(pVCpu))
9478 { /* likely */ }
9479 else
9480 {
9481 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9482 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9483 { /*likely */ }
9484 else
9485 return rcStrict;
9486 }
9487
9488 /*
9489 * Initial decoder init w/ prefetch, then setup setjmp.
9490 */
9491 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9492 if (rcStrict == VINF_SUCCESS)
9493 {
9494#ifdef IEM_WITH_SETJMP
9495 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9496 IEM_TRY_SETJMP(pVCpu, rcStrict)
9497#endif
9498 {
9499 /*
9500 * The run loop. We limit ourselves to 4096 instructions right now.
9501 */
9502 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9503 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9504 for (;;)
9505 {
9506 /*
9507 * Log the state.
9508 */
9509#ifdef LOG_ENABLED
9510 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9511#endif
9512
9513 /*
9514 * Do the decoding and emulation.
9515 */
9516 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9517 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9518#ifdef VBOX_STRICT
9519 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9520#endif
9521 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9522 {
9523 Assert(pVCpu->iem.s.cActiveMappings == 0);
9524 pVCpu->iem.s.cInstructions++;
9525
9526#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9527 /* Perform any VMX nested-guest instruction boundary actions. */
9528 uint64_t fCpu = pVCpu->fLocalForcedActions;
9529 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9530 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9531 { /* likely */ }
9532 else
9533 {
9534 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9535 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9536 fCpu = pVCpu->fLocalForcedActions;
9537 else
9538 {
9539 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9540 break;
9541 }
9542 }
9543#endif
9544 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9545 {
9546#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9547 uint64_t fCpu = pVCpu->fLocalForcedActions;
9548#endif
9549 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9550 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9551 | VMCPU_FF_TLB_FLUSH
9552 | VMCPU_FF_UNHALT );
9553
9554 if (RT_LIKELY( ( !fCpu
9555 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9556 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9557 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9558 {
9559 if (--cMaxInstructionsGccStupidity > 0)
9560 {
9561 /* Poll timers every now an then according to the caller's specs. */
9562 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9563 || !TMTimerPollBool(pVM, pVCpu))
9564 {
9565 Assert(pVCpu->iem.s.cActiveMappings == 0);
9566 iemReInitDecoder(pVCpu);
9567 continue;
9568 }
9569 }
9570 }
9571 }
9572 Assert(pVCpu->iem.s.cActiveMappings == 0);
9573 }
9574 else if (pVCpu->iem.s.cActiveMappings > 0)
9575 iemMemRollback(pVCpu);
9576 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9577 break;
9578 }
9579 }
9580#ifdef IEM_WITH_SETJMP
9581 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9582 {
9583 if (pVCpu->iem.s.cActiveMappings > 0)
9584 iemMemRollback(pVCpu);
9585# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9586 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9587# endif
9588 pVCpu->iem.s.cLongJumps++;
9589 }
9590 IEM_CATCH_LONGJMP_END(pVCpu);
9591#endif
9592
9593 /*
9594 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9595 */
9596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9597 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9598 }
9599 else
9600 {
9601 if (pVCpu->iem.s.cActiveMappings > 0)
9602 iemMemRollback(pVCpu);
9603
9604#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9605 /*
9606 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9607 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9608 */
9609 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9610#endif
9611 }
9612
9613 /*
9614 * Maybe re-enter raw-mode and log.
9615 */
9616 if (rcStrict != VINF_SUCCESS)
9617 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9618 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9619 if (pcInstructions)
9620 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9621 return rcStrict;
9622}
9623
9624
9625/**
9626 * Interface used by EMExecuteExec, does exit statistics and limits.
9627 *
9628 * @returns Strict VBox status code.
9629 * @param pVCpu The cross context virtual CPU structure.
9630 * @param fWillExit To be defined.
9631 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9632 * @param cMaxInstructions Maximum number of instructions to execute.
9633 * @param cMaxInstructionsWithoutExits
9634 * The max number of instructions without exits.
9635 * @param pStats Where to return statistics.
9636 */
9637VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9638 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9639{
9640 NOREF(fWillExit); /** @todo define flexible exit crits */
9641
9642 /*
9643 * Initialize return stats.
9644 */
9645 pStats->cInstructions = 0;
9646 pStats->cExits = 0;
9647 pStats->cMaxExitDistance = 0;
9648 pStats->cReserved = 0;
9649
9650 /*
9651 * Initial decoder init w/ prefetch, then setup setjmp.
9652 */
9653 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9654 if (rcStrict == VINF_SUCCESS)
9655 {
9656#ifdef IEM_WITH_SETJMP
9657 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9658 IEM_TRY_SETJMP(pVCpu, rcStrict)
9659#endif
9660 {
9661#ifdef IN_RING0
9662 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9663#endif
9664 uint32_t cInstructionSinceLastExit = 0;
9665
9666 /*
9667 * The run loop. We limit ourselves to 4096 instructions right now.
9668 */
9669 PVM pVM = pVCpu->CTX_SUFF(pVM);
9670 for (;;)
9671 {
9672 /*
9673 * Log the state.
9674 */
9675#ifdef LOG_ENABLED
9676 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9677#endif
9678
9679 /*
9680 * Do the decoding and emulation.
9681 */
9682 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9683
9684 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9685 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9686
9687 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9688 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9689 {
9690 pStats->cExits += 1;
9691 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9692 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9693 cInstructionSinceLastExit = 0;
9694 }
9695
9696 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9697 {
9698 Assert(pVCpu->iem.s.cActiveMappings == 0);
9699 pVCpu->iem.s.cInstructions++;
9700 pStats->cInstructions++;
9701 cInstructionSinceLastExit++;
9702
9703#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9704 /* Perform any VMX nested-guest instruction boundary actions. */
9705 uint64_t fCpu = pVCpu->fLocalForcedActions;
9706 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9707 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9708 { /* likely */ }
9709 else
9710 {
9711 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9712 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9713 fCpu = pVCpu->fLocalForcedActions;
9714 else
9715 {
9716 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9717 break;
9718 }
9719 }
9720#endif
9721 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9722 {
9723#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9724 uint64_t fCpu = pVCpu->fLocalForcedActions;
9725#endif
9726 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9727 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9728 | VMCPU_FF_TLB_FLUSH
9729 | VMCPU_FF_UNHALT );
9730 if (RT_LIKELY( ( ( !fCpu
9731 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9732 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9733 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9734 || pStats->cInstructions < cMinInstructions))
9735 {
9736 if (pStats->cInstructions < cMaxInstructions)
9737 {
9738 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9739 {
9740#ifdef IN_RING0
9741 if ( !fCheckPreemptionPending
9742 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9743#endif
9744 {
9745 Assert(pVCpu->iem.s.cActiveMappings == 0);
9746 iemReInitDecoder(pVCpu);
9747 continue;
9748 }
9749#ifdef IN_RING0
9750 rcStrict = VINF_EM_RAW_INTERRUPT;
9751 break;
9752#endif
9753 }
9754 }
9755 }
9756 Assert(!(fCpu & VMCPU_FF_IEM));
9757 }
9758 Assert(pVCpu->iem.s.cActiveMappings == 0);
9759 }
9760 else if (pVCpu->iem.s.cActiveMappings > 0)
9761 iemMemRollback(pVCpu);
9762 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9763 break;
9764 }
9765 }
9766#ifdef IEM_WITH_SETJMP
9767 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9768 {
9769 if (pVCpu->iem.s.cActiveMappings > 0)
9770 iemMemRollback(pVCpu);
9771 pVCpu->iem.s.cLongJumps++;
9772 }
9773 IEM_CATCH_LONGJMP_END(pVCpu);
9774#endif
9775
9776 /*
9777 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9778 */
9779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9781 }
9782 else
9783 {
9784 if (pVCpu->iem.s.cActiveMappings > 0)
9785 iemMemRollback(pVCpu);
9786
9787#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9788 /*
9789 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9790 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9791 */
9792 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9793#endif
9794 }
9795
9796 /*
9797 * Maybe re-enter raw-mode and log.
9798 */
9799 if (rcStrict != VINF_SUCCESS)
9800 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9801 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9802 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9803 return rcStrict;
9804}
9805
9806
9807/**
9808 * Injects a trap, fault, abort, software interrupt or external interrupt.
9809 *
9810 * The parameter list matches TRPMQueryTrapAll pretty closely.
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9814 * @param u8TrapNo The trap number.
9815 * @param enmType What type is it (trap/fault/abort), software
9816 * interrupt or hardware interrupt.
9817 * @param uErrCode The error code if applicable.
9818 * @param uCr2 The CR2 value if applicable.
9819 * @param cbInstr The instruction length (only relevant for
9820 * software interrupts).
9821 */
9822VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9823 uint8_t cbInstr)
9824{
9825 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9826#ifdef DBGFTRACE_ENABLED
9827 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9828 u8TrapNo, enmType, uErrCode, uCr2);
9829#endif
9830
9831 uint32_t fFlags;
9832 switch (enmType)
9833 {
9834 case TRPM_HARDWARE_INT:
9835 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9836 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9837 uErrCode = uCr2 = 0;
9838 break;
9839
9840 case TRPM_SOFTWARE_INT:
9841 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9842 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9843 uErrCode = uCr2 = 0;
9844 break;
9845
9846 case TRPM_TRAP:
9847 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9848 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9849 if (u8TrapNo == X86_XCPT_PF)
9850 fFlags |= IEM_XCPT_FLAGS_CR2;
9851 switch (u8TrapNo)
9852 {
9853 case X86_XCPT_DF:
9854 case X86_XCPT_TS:
9855 case X86_XCPT_NP:
9856 case X86_XCPT_SS:
9857 case X86_XCPT_PF:
9858 case X86_XCPT_AC:
9859 case X86_XCPT_GP:
9860 fFlags |= IEM_XCPT_FLAGS_ERR;
9861 break;
9862 }
9863 break;
9864
9865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9866 }
9867
9868 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9869
9870 if (pVCpu->iem.s.cActiveMappings > 0)
9871 iemMemRollback(pVCpu);
9872
9873 return rcStrict;
9874}
9875
9876
9877/**
9878 * Injects the active TRPM event.
9879 *
9880 * @returns Strict VBox status code.
9881 * @param pVCpu The cross context virtual CPU structure.
9882 */
9883VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9884{
9885#ifndef IEM_IMPLEMENTS_TASKSWITCH
9886 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9887#else
9888 uint8_t u8TrapNo;
9889 TRPMEVENT enmType;
9890 uint32_t uErrCode;
9891 RTGCUINTPTR uCr2;
9892 uint8_t cbInstr;
9893 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9894 if (RT_FAILURE(rc))
9895 return rc;
9896
9897 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9898 * ICEBP \#DB injection as a special case. */
9899 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9900#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9901 if (rcStrict == VINF_SVM_VMEXIT)
9902 rcStrict = VINF_SUCCESS;
9903#endif
9904#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9905 if (rcStrict == VINF_VMX_VMEXIT)
9906 rcStrict = VINF_SUCCESS;
9907#endif
9908 /** @todo Are there any other codes that imply the event was successfully
9909 * delivered to the guest? See @bugref{6607}. */
9910 if ( rcStrict == VINF_SUCCESS
9911 || rcStrict == VINF_IEM_RAISED_XCPT)
9912 TRPMResetTrap(pVCpu);
9913
9914 return rcStrict;
9915#endif
9916}
9917
9918
9919VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9920{
9921 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9922 return VERR_NOT_IMPLEMENTED;
9923}
9924
9925
9926VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9927{
9928 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9929 return VERR_NOT_IMPLEMENTED;
9930}
9931
9932
9933/**
9934 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9935 *
9936 * This API ASSUMES that the caller has already verified that the guest code is
9937 * allowed to access the I/O port. (The I/O port is in the DX register in the
9938 * guest state.)
9939 *
9940 * @returns Strict VBox status code.
9941 * @param pVCpu The cross context virtual CPU structure.
9942 * @param cbValue The size of the I/O port access (1, 2, or 4).
9943 * @param enmAddrMode The addressing mode.
9944 * @param fRepPrefix Indicates whether a repeat prefix is used
9945 * (doesn't matter which for this instruction).
9946 * @param cbInstr The instruction length in bytes.
9947 * @param iEffSeg The effective segment address.
9948 * @param fIoChecked Whether the access to the I/O port has been
9949 * checked or not. It's typically checked in the
9950 * HM scenario.
9951 */
9952VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9953 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9954{
9955 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9956 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9957
9958 /*
9959 * State init.
9960 */
9961 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9962
9963 /*
9964 * Switch orgy for getting to the right handler.
9965 */
9966 VBOXSTRICTRC rcStrict;
9967 if (fRepPrefix)
9968 {
9969 switch (enmAddrMode)
9970 {
9971 case IEMMODE_16BIT:
9972 switch (cbValue)
9973 {
9974 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9975 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9976 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9977 default:
9978 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9979 }
9980 break;
9981
9982 case IEMMODE_32BIT:
9983 switch (cbValue)
9984 {
9985 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9986 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9987 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9988 default:
9989 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9990 }
9991 break;
9992
9993 case IEMMODE_64BIT:
9994 switch (cbValue)
9995 {
9996 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9997 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9998 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9999 default:
10000 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10001 }
10002 break;
10003
10004 default:
10005 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10006 }
10007 }
10008 else
10009 {
10010 switch (enmAddrMode)
10011 {
10012 case IEMMODE_16BIT:
10013 switch (cbValue)
10014 {
10015 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10016 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10017 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10018 default:
10019 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10020 }
10021 break;
10022
10023 case IEMMODE_32BIT:
10024 switch (cbValue)
10025 {
10026 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10027 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10028 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10029 default:
10030 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10031 }
10032 break;
10033
10034 case IEMMODE_64BIT:
10035 switch (cbValue)
10036 {
10037 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10038 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10039 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10040 default:
10041 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10042 }
10043 break;
10044
10045 default:
10046 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10047 }
10048 }
10049
10050 if (pVCpu->iem.s.cActiveMappings)
10051 iemMemRollback(pVCpu);
10052
10053 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10054}
10055
10056
10057/**
10058 * Interface for HM and EM for executing string I/O IN (read) instructions.
10059 *
10060 * This API ASSUMES that the caller has already verified that the guest code is
10061 * allowed to access the I/O port. (The I/O port is in the DX register in the
10062 * guest state.)
10063 *
10064 * @returns Strict VBox status code.
10065 * @param pVCpu The cross context virtual CPU structure.
10066 * @param cbValue The size of the I/O port access (1, 2, or 4).
10067 * @param enmAddrMode The addressing mode.
10068 * @param fRepPrefix Indicates whether a repeat prefix is used
10069 * (doesn't matter which for this instruction).
10070 * @param cbInstr The instruction length in bytes.
10071 * @param fIoChecked Whether the access to the I/O port has been
10072 * checked or not. It's typically checked in the
10073 * HM scenario.
10074 */
10075VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10076 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10077{
10078 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10079
10080 /*
10081 * State init.
10082 */
10083 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10084
10085 /*
10086 * Switch orgy for getting to the right handler.
10087 */
10088 VBOXSTRICTRC rcStrict;
10089 if (fRepPrefix)
10090 {
10091 switch (enmAddrMode)
10092 {
10093 case IEMMODE_16BIT:
10094 switch (cbValue)
10095 {
10096 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10097 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10098 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10099 default:
10100 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10101 }
10102 break;
10103
10104 case IEMMODE_32BIT:
10105 switch (cbValue)
10106 {
10107 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10108 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10109 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10110 default:
10111 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10112 }
10113 break;
10114
10115 case IEMMODE_64BIT:
10116 switch (cbValue)
10117 {
10118 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10119 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10120 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10121 default:
10122 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10123 }
10124 break;
10125
10126 default:
10127 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10128 }
10129 }
10130 else
10131 {
10132 switch (enmAddrMode)
10133 {
10134 case IEMMODE_16BIT:
10135 switch (cbValue)
10136 {
10137 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10138 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10139 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10140 default:
10141 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10142 }
10143 break;
10144
10145 case IEMMODE_32BIT:
10146 switch (cbValue)
10147 {
10148 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10149 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10150 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10151 default:
10152 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10153 }
10154 break;
10155
10156 case IEMMODE_64BIT:
10157 switch (cbValue)
10158 {
10159 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10160 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10161 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10162 default:
10163 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10164 }
10165 break;
10166
10167 default:
10168 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10169 }
10170 }
10171
10172 if ( pVCpu->iem.s.cActiveMappings == 0
10173 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10174 { /* likely */ }
10175 else
10176 {
10177 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10178 iemMemRollback(pVCpu);
10179 }
10180 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10181}
10182
10183
10184/**
10185 * Interface for rawmode to write execute an OUT instruction.
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure.
10189 * @param cbInstr The instruction length in bytes.
10190 * @param u16Port The port to read.
10191 * @param fImm Whether the port is specified using an immediate operand or
10192 * using the implicit DX register.
10193 * @param cbReg The register size.
10194 *
10195 * @remarks In ring-0 not all of the state needs to be synced in.
10196 */
10197VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10198{
10199 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10200 Assert(cbReg <= 4 && cbReg != 3);
10201
10202 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10203 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10204 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10205 Assert(!pVCpu->iem.s.cActiveMappings);
10206 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10207}
10208
10209
10210/**
10211 * Interface for rawmode to write execute an IN instruction.
10212 *
10213 * @returns Strict VBox status code.
10214 * @param pVCpu The cross context virtual CPU structure.
10215 * @param cbInstr The instruction length in bytes.
10216 * @param u16Port The port to read.
10217 * @param fImm Whether the port is specified using an immediate operand or
10218 * using the implicit DX.
10219 * @param cbReg The register size.
10220 */
10221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10222{
10223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10224 Assert(cbReg <= 4 && cbReg != 3);
10225
10226 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10228 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10229 Assert(!pVCpu->iem.s.cActiveMappings);
10230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10231}
10232
10233
10234/**
10235 * Interface for HM and EM to write to a CRx register.
10236 *
10237 * @returns Strict VBox status code.
10238 * @param pVCpu The cross context virtual CPU structure.
10239 * @param cbInstr The instruction length in bytes.
10240 * @param iCrReg The control register number (destination).
10241 * @param iGReg The general purpose register number (source).
10242 *
10243 * @remarks In ring-0 not all of the state needs to be synced in.
10244 */
10245VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10246{
10247 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10248 Assert(iCrReg < 16);
10249 Assert(iGReg < 16);
10250
10251 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10252 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10253 Assert(!pVCpu->iem.s.cActiveMappings);
10254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10255}
10256
10257
10258/**
10259 * Interface for HM and EM to read from a CRx register.
10260 *
10261 * @returns Strict VBox status code.
10262 * @param pVCpu The cross context virtual CPU structure.
10263 * @param cbInstr The instruction length in bytes.
10264 * @param iGReg The general purpose register number (destination).
10265 * @param iCrReg The control register number (source).
10266 *
10267 * @remarks In ring-0 not all of the state needs to be synced in.
10268 */
10269VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10270{
10271 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10272 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10273 | CPUMCTX_EXTRN_APIC_TPR);
10274 Assert(iCrReg < 16);
10275 Assert(iGReg < 16);
10276
10277 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10278 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10279 Assert(!pVCpu->iem.s.cActiveMappings);
10280 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10281}
10282
10283
10284/**
10285 * Interface for HM and EM to write to a DRx register.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure.
10289 * @param cbInstr The instruction length in bytes.
10290 * @param iDrReg The debug register number (destination).
10291 * @param iGReg The general purpose register number (source).
10292 *
10293 * @remarks In ring-0 not all of the state needs to be synced in.
10294 */
10295VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10296{
10297 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10298 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10299 Assert(iDrReg < 8);
10300 Assert(iGReg < 16);
10301
10302 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10303 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10304 Assert(!pVCpu->iem.s.cActiveMappings);
10305 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10306}
10307
10308
10309/**
10310 * Interface for HM and EM to read from a DRx register.
10311 *
10312 * @returns Strict VBox status code.
10313 * @param pVCpu The cross context virtual CPU structure.
10314 * @param cbInstr The instruction length in bytes.
10315 * @param iGReg The general purpose register number (destination).
10316 * @param iDrReg The debug register number (source).
10317 *
10318 * @remarks In ring-0 not all of the state needs to be synced in.
10319 */
10320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10321{
10322 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10323 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10324 Assert(iDrReg < 8);
10325 Assert(iGReg < 16);
10326
10327 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10328 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10329 Assert(!pVCpu->iem.s.cActiveMappings);
10330 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10331}
10332
10333
10334/**
10335 * Interface for HM and EM to clear the CR0[TS] bit.
10336 *
10337 * @returns Strict VBox status code.
10338 * @param pVCpu The cross context virtual CPU structure.
10339 * @param cbInstr The instruction length in bytes.
10340 *
10341 * @remarks In ring-0 not all of the state needs to be synced in.
10342 */
10343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10344{
10345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10346
10347 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10349 Assert(!pVCpu->iem.s.cActiveMappings);
10350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10351}
10352
10353
10354/**
10355 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure.
10359 * @param cbInstr The instruction length in bytes.
10360 * @param uValue The value to load into CR0.
10361 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10362 * memory operand. Otherwise pass NIL_RTGCPTR.
10363 *
10364 * @remarks In ring-0 not all of the state needs to be synced in.
10365 */
10366VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10367{
10368 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10369
10370 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10371 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10372 Assert(!pVCpu->iem.s.cActiveMappings);
10373 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10374}
10375
10376
10377/**
10378 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10379 *
10380 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10384 * @param cbInstr The instruction length in bytes.
10385 * @remarks In ring-0 not all of the state needs to be synced in.
10386 * @thread EMT(pVCpu)
10387 */
10388VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10389{
10390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10391
10392 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10394 Assert(!pVCpu->iem.s.cActiveMappings);
10395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10396}
10397
10398
10399/**
10400 * Interface for HM and EM to emulate the WBINVD instruction.
10401 *
10402 * @returns Strict VBox status code.
10403 * @param pVCpu The cross context virtual CPU structure.
10404 * @param cbInstr The instruction length in bytes.
10405 *
10406 * @remarks In ring-0 not all of the state needs to be synced in.
10407 */
10408VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10409{
10410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10411
10412 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10413 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10414 Assert(!pVCpu->iem.s.cActiveMappings);
10415 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10416}
10417
10418
10419/**
10420 * Interface for HM and EM to emulate the INVD instruction.
10421 *
10422 * @returns Strict VBox status code.
10423 * @param pVCpu The cross context virtual CPU structure.
10424 * @param cbInstr The instruction length in bytes.
10425 *
10426 * @remarks In ring-0 not all of the state needs to be synced in.
10427 */
10428VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10429{
10430 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10431
10432 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10434 Assert(!pVCpu->iem.s.cActiveMappings);
10435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10436}
10437
10438
10439/**
10440 * Interface for HM and EM to emulate the INVLPG instruction.
10441 *
10442 * @returns Strict VBox status code.
10443 * @retval VINF_PGM_SYNC_CR3
10444 *
10445 * @param pVCpu The cross context virtual CPU structure.
10446 * @param cbInstr The instruction length in bytes.
10447 * @param GCPtrPage The effective address of the page to invalidate.
10448 *
10449 * @remarks In ring-0 not all of the state needs to be synced in.
10450 */
10451VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10452{
10453 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10454
10455 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10456 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10457 Assert(!pVCpu->iem.s.cActiveMappings);
10458 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10459}
10460
10461
10462/**
10463 * Interface for HM and EM to emulate the INVPCID instruction.
10464 *
10465 * @returns Strict VBox status code.
10466 * @retval VINF_PGM_SYNC_CR3
10467 *
10468 * @param pVCpu The cross context virtual CPU structure.
10469 * @param cbInstr The instruction length in bytes.
10470 * @param iEffSeg The effective segment register.
10471 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10472 * @param uType The invalidation type.
10473 *
10474 * @remarks In ring-0 not all of the state needs to be synced in.
10475 */
10476VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10477 uint64_t uType)
10478{
10479 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10480
10481 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10482 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10483 Assert(!pVCpu->iem.s.cActiveMappings);
10484 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10485}
10486
10487
10488/**
10489 * Interface for HM and EM to emulate the CPUID instruction.
10490 *
10491 * @returns Strict VBox status code.
10492 *
10493 * @param pVCpu The cross context virtual CPU structure.
10494 * @param cbInstr The instruction length in bytes.
10495 *
10496 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10497 */
10498VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10499{
10500 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10501 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to emulate the RDPMC instruction.
10512 *
10513 * @returns Strict VBox status code.
10514 *
10515 * @param pVCpu The cross context virtual CPU structure.
10516 * @param cbInstr The instruction length in bytes.
10517 *
10518 * @remarks Not all of the state needs to be synced in.
10519 */
10520VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10521{
10522 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10523 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10524
10525 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10526 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10527 Assert(!pVCpu->iem.s.cActiveMappings);
10528 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10529}
10530
10531
10532/**
10533 * Interface for HM and EM to emulate the RDTSC instruction.
10534 *
10535 * @returns Strict VBox status code.
10536 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10537 *
10538 * @param pVCpu The cross context virtual CPU structure.
10539 * @param cbInstr The instruction length in bytes.
10540 *
10541 * @remarks Not all of the state needs to be synced in.
10542 */
10543VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10544{
10545 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10546 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10547
10548 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10550 Assert(!pVCpu->iem.s.cActiveMappings);
10551 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10552}
10553
10554
10555/**
10556 * Interface for HM and EM to emulate the RDTSCP instruction.
10557 *
10558 * @returns Strict VBox status code.
10559 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10560 *
10561 * @param pVCpu The cross context virtual CPU structure.
10562 * @param cbInstr The instruction length in bytes.
10563 *
10564 * @remarks Not all of the state needs to be synced in. Recommended
10565 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10566 */
10567VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10568{
10569 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10570 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10571
10572 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10573 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10574 Assert(!pVCpu->iem.s.cActiveMappings);
10575 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10576}
10577
10578
10579/**
10580 * Interface for HM and EM to emulate the RDMSR instruction.
10581 *
10582 * @returns Strict VBox status code.
10583 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10584 *
10585 * @param pVCpu The cross context virtual CPU structure.
10586 * @param cbInstr The instruction length in bytes.
10587 *
10588 * @remarks Not all of the state needs to be synced in. Requires RCX and
10589 * (currently) all MSRs.
10590 */
10591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10592{
10593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10594 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to emulate the WRMSR instruction.
10605 *
10606 * @returns Strict VBox status code.
10607 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10608 *
10609 * @param pVCpu The cross context virtual CPU structure.
10610 * @param cbInstr The instruction length in bytes.
10611 *
10612 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10613 * and (currently) all MSRs.
10614 */
10615VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10616{
10617 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10618 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10619 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10620
10621 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10622 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10623 Assert(!pVCpu->iem.s.cActiveMappings);
10624 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10625}
10626
10627
10628/**
10629 * Interface for HM and EM to emulate the MONITOR instruction.
10630 *
10631 * @returns Strict VBox status code.
10632 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10633 *
10634 * @param pVCpu The cross context virtual CPU structure.
10635 * @param cbInstr The instruction length in bytes.
10636 *
10637 * @remarks Not all of the state needs to be synced in.
10638 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10639 * are used.
10640 */
10641VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10642{
10643 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10644 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10645
10646 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10647 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10648 Assert(!pVCpu->iem.s.cActiveMappings);
10649 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10650}
10651
10652
10653/**
10654 * Interface for HM and EM to emulate the MWAIT instruction.
10655 *
10656 * @returns Strict VBox status code.
10657 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10658 *
10659 * @param pVCpu The cross context virtual CPU structure.
10660 * @param cbInstr The instruction length in bytes.
10661 *
10662 * @remarks Not all of the state needs to be synced in.
10663 */
10664VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10665{
10666 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10667 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10668
10669 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10670 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10671 Assert(!pVCpu->iem.s.cActiveMappings);
10672 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10673}
10674
10675
10676/**
10677 * Interface for HM and EM to emulate the HLT instruction.
10678 *
10679 * @returns Strict VBox status code.
10680 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10681 *
10682 * @param pVCpu The cross context virtual CPU structure.
10683 * @param cbInstr The instruction length in bytes.
10684 *
10685 * @remarks Not all of the state needs to be synced in.
10686 */
10687VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10688{
10689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10690
10691 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10692 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10693 Assert(!pVCpu->iem.s.cActiveMappings);
10694 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10695}
10696
10697
10698/**
10699 * Checks if IEM is in the process of delivering an event (interrupt or
10700 * exception).
10701 *
10702 * @returns true if we're in the process of raising an interrupt or exception,
10703 * false otherwise.
10704 * @param pVCpu The cross context virtual CPU structure.
10705 * @param puVector Where to store the vector associated with the
10706 * currently delivered event, optional.
10707 * @param pfFlags Where to store th event delivery flags (see
10708 * IEM_XCPT_FLAGS_XXX), optional.
10709 * @param puErr Where to store the error code associated with the
10710 * event, optional.
10711 * @param puCr2 Where to store the CR2 associated with the event,
10712 * optional.
10713 * @remarks The caller should check the flags to determine if the error code and
10714 * CR2 are valid for the event.
10715 */
10716VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10717{
10718 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10719 if (fRaisingXcpt)
10720 {
10721 if (puVector)
10722 *puVector = pVCpu->iem.s.uCurXcpt;
10723 if (pfFlags)
10724 *pfFlags = pVCpu->iem.s.fCurXcpt;
10725 if (puErr)
10726 *puErr = pVCpu->iem.s.uCurXcptErr;
10727 if (puCr2)
10728 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10729 }
10730 return fRaisingXcpt;
10731}
10732
10733#ifdef IN_RING3
10734
10735/**
10736 * Handles the unlikely and probably fatal merge cases.
10737 *
10738 * @returns Merged status code.
10739 * @param rcStrict Current EM status code.
10740 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10741 * with @a rcStrict.
10742 * @param iMemMap The memory mapping index. For error reporting only.
10743 * @param pVCpu The cross context virtual CPU structure of the calling
10744 * thread, for error reporting only.
10745 */
10746DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10747 unsigned iMemMap, PVMCPUCC pVCpu)
10748{
10749 if (RT_FAILURE_NP(rcStrict))
10750 return rcStrict;
10751
10752 if (RT_FAILURE_NP(rcStrictCommit))
10753 return rcStrictCommit;
10754
10755 if (rcStrict == rcStrictCommit)
10756 return rcStrictCommit;
10757
10758 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10759 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10760 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10761 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10762 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10763 return VERR_IOM_FF_STATUS_IPE;
10764}
10765
10766
10767/**
10768 * Helper for IOMR3ProcessForceFlag.
10769 *
10770 * @returns Merged status code.
10771 * @param rcStrict Current EM status code.
10772 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10773 * with @a rcStrict.
10774 * @param iMemMap The memory mapping index. For error reporting only.
10775 * @param pVCpu The cross context virtual CPU structure of the calling
10776 * thread, for error reporting only.
10777 */
10778DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10779{
10780 /* Simple. */
10781 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10782 return rcStrictCommit;
10783
10784 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10785 return rcStrict;
10786
10787 /* EM scheduling status codes. */
10788 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10789 && rcStrict <= VINF_EM_LAST))
10790 {
10791 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10792 && rcStrictCommit <= VINF_EM_LAST))
10793 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10794 }
10795
10796 /* Unlikely */
10797 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10798}
10799
10800
10801/**
10802 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10803 *
10804 * @returns Merge between @a rcStrict and what the commit operation returned.
10805 * @param pVM The cross context VM structure.
10806 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10807 * @param rcStrict The status code returned by ring-0 or raw-mode.
10808 */
10809VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10810{
10811 /*
10812 * Reset the pending commit.
10813 */
10814 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10815 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10816 ("%#x %#x %#x\n",
10817 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10818 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10819
10820 /*
10821 * Commit the pending bounce buffers (usually just one).
10822 */
10823 unsigned cBufs = 0;
10824 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10825 while (iMemMap-- > 0)
10826 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10827 {
10828 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10829 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10830 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10831
10832 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10833 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10834 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10835
10836 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10837 {
10838 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10840 pbBuf,
10841 cbFirst,
10842 PGMACCESSORIGIN_IEM);
10843 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10844 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10845 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10846 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10847 }
10848
10849 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10850 {
10851 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10853 pbBuf + cbFirst,
10854 cbSecond,
10855 PGMACCESSORIGIN_IEM);
10856 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10857 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10858 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10859 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10860 }
10861 cBufs++;
10862 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10863 }
10864
10865 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10866 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10867 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10868 pVCpu->iem.s.cActiveMappings = 0;
10869 return rcStrict;
10870}
10871
10872#endif /* IN_RING3 */
10873
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette