VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 108186

Last change on this file since 108186 was 108186, checked in by vboxsync, 4 days ago

VMM/IEM: Removed memory write stats since nobody is using the anymore (consumer was PATM); mark APIs as internal where possible. jiraref:VBP-1431

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 473.2 KB
Line 
1/* $Id: IEMAll.cpp 108186 2025-02-12 15:35:15Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/pdmapic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
652 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;
653 ASMBitSet(pTlb->bmLargePage, idxBit);
654# endif
655
656 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
657 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
658 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
659 ? &pTlb->GlobalLargePageRange
660 : &pTlb->NonGlobalLargePageRange;
661 uTagNoRev &= ~(RTGCPTR)fMask;
662 if (uTagNoRev < pRange->uFirstTag)
663 pRange->uFirstTag = uTagNoRev;
664
665 uTagNoRev |= fMask;
666 if (uTagNoRev > pRange->uLastTag)
667 pRange->uLastTag = uTagNoRev;
668
669 RT_NOREF_PV(pVCpu);
670}
671#endif
672
673
674#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
675/**
676 * Worker for iemTlbInvalidateAll.
677 */
678template<bool a_fGlobal>
679DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
680{
681 if (!a_fGlobal)
682 pTlb->cTlsFlushes++;
683 else
684 pTlb->cTlsGlobalFlushes++;
685
686 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
687 if (RT_LIKELY(pTlb->uTlbRevision != 0))
688 { /* very likely */ }
689 else
690 {
691 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
692 pTlb->cTlbRevisionRollovers++;
693 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
694 while (i-- > 0)
695 pTlb->aEntries[i * 2].uTag = 0;
696 }
697
698 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
699 pTlb->NonGlobalLargePageRange.uLastTag = 0;
700 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
701
702 if (a_fGlobal)
703 {
704 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
705 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
706 { /* very likely */ }
707 else
708 {
709 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
710 pTlb->cTlbRevisionRollovers++;
711 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
712 while (i-- > 0)
713 pTlb->aEntries[i * 2 + 1].uTag = 0;
714 }
715
716 pTlb->cTlbGlobalLargePageCurLoads = 0;
717 pTlb->GlobalLargePageRange.uLastTag = 0;
718 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
719 }
720}
721#endif
722
723
724/**
725 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
726 */
727template<bool a_fGlobal>
728DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
729{
730#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
731 Log10(("IEMTlbInvalidateAll\n"));
732
733# ifdef IEM_WITH_CODE_TLB
734 pVCpu->iem.s.cbInstrBufTotal = 0;
735 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
736 if (a_fGlobal)
737 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
738 else
739 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
740# endif
741
742# ifdef IEM_WITH_DATA_TLB
743 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
744 if (a_fGlobal)
745 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
746 else
747 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
748# endif
749#else
750 RT_NOREF(pVCpu);
751#endif
752}
753
754
755/**
756 * Invalidates non-global the IEM TLB entries.
757 *
758 * This is called internally as well as by PGM when moving GC mappings.
759 *
760 * @param pVCpu The cross context virtual CPU structure of the calling
761 * thread.
762 */
763VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
764{
765 iemTlbInvalidateAll<false>(pVCpu);
766}
767
768
769/**
770 * Invalidates all the IEM TLB entries.
771 *
772 * This is called internally as well as by PGM when moving GC mappings.
773 *
774 * @param pVCpu The cross context virtual CPU structure of the calling
775 * thread.
776 */
777VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
778{
779 iemTlbInvalidateAll<true>(pVCpu);
780}
781
782
783#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
784
785/** @todo graduate this to cdefs.h or asm-mem.h. */
786# ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */
787# undef RT_CACHELINE_SIZE
788# define RT_CACHELINE_SIZE 128
789# endif
790
791# if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
792# define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)
793# elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))
794# define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))
795# elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)
796# define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)
797# else
798# define MY_PREFETCH(a_pvAddr) ((void)0)
799# endif
800# if 0
801# undef MY_PREFETCH
802# define MY_PREFETCH(a_pvAddr) ((void)0)
803# endif
804
805/** @def MY_PREFETCH_64
806 * 64 byte prefetch hint, could be more depending on cache line size. */
807/** @def MY_PREFETCH_128
808 * 128 byte prefetch hint. */
809/** @def MY_PREFETCH_256
810 * 256 byte prefetch hint. */
811# if RT_CACHELINE_SIZE >= 128
812 /* 128 byte cache lines */
813# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
814# define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)
815# define MY_PREFETCH_256(a_pvAddr) do { \
816 MY_PREFETCH(a_pvAddr); \
817 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
818 } while (0)
819# else
820 /* 64 byte cache lines */
821# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
822# define MY_PREFETCH_128(a_pvAddr) do { \
823 MY_PREFETCH(a_pvAddr); \
824 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
825 } while (0)
826# define MY_PREFETCH_256(a_pvAddr) do { \
827 MY_PREFETCH(a_pvAddr); \
828 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
829 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
830 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \
831 } while (0)
832# endif
833
834template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
835DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
836 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
837{
838 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
839 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */
840
841 if (a_fGlobal)
842 pTlb->cTlbInvlPgLargeGlobal += 1;
843 if (a_fNonGlobal)
844 pTlb->cTlbInvlPgLargeNonGlobal += 1;
845
846 /*
847 * Set up the scan.
848 *
849 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
850 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
851 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
852 * that fold large page offsets 1MB-2MB into the 0-1MB range.
853 *
854 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
855 *
856 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for
857 * relevant host architectures.
858 */
859 /** @todo benchmark this code from the guest side. */
860 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
861#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
862 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;
863 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64
864 : IEMTLB_ENTRY_COUNT * 2 / 64;
865#else
866 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
867 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);
868 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
869#endif
870 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
871 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
872 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
873
874 /*
875 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.
876 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.
877 */
878 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
879 if ( !a_fDataTlb
880 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))
881 pVCpu->iem.s.cbInstrBufTotal = 0;
882
883 /*
884 * Combine TAG values with the TLB revisions.
885 */
886 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
887 if (a_fNonGlobal)
888 GCPtrTag |= pTlb->uTlbRevision;
889
890 /*
891 * Do the scanning.
892 */
893#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
894 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX
895 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);
896 /* Scan bitmap entries (64 bits at the time): */
897 for (;;)
898 {
899# if 1
900 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
901 if (bmEntry)
902 {
903 /* Scan the non-zero 64-bit value in groups of 8 bits: */
904 uint64_t bmToClear = 0;
905 uintptr_t idxEven = idxBitmap * 64;
906 uint32_t idxTag = 0;
907 for (;;)
908 {
909 if (bmEntry & 0xff)
910 {
911# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
912 if (a_fNonGlobal) \
913 { \
914 if (bmEntry & a_bmNonGlobal) \
915 { \
916 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
917 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
918 { \
919 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
920 pTlb->aEntries[a_idxEvenIter].GCPhys, \
921 a_idxEvenIter, a_fDataTlb); \
922 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
923 bmToClearSub8 |= a_bmNonGlobal; \
924 } \
925 } \
926 else \
927 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
928 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
929 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
930 } \
931 if (a_fGlobal) \
932 { \
933 if (bmEntry & a_bmGlobal) \
934 { \
935 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
936 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
937 { \
938 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
939 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
940 a_idxEvenIter + 1, a_fDataTlb); \
941 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
942 bmToClearSub8 |= a_bmGlobal; \
943 } \
944 } \
945 else \
946 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
947 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
948 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
949 }
950 uint64_t bmToClearSub8 = 0;
951 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)
952 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)
953 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)
954 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)
955 bmToClear |= bmToClearSub8 << (idxTag * 2);
956# undef ONE_PAIR
957 }
958
959 /* advance to the next 8 bits. */
960 bmEntry >>= 8;
961 if (!bmEntry)
962 break;
963 idxEven += 8;
964 idxTag += 4;
965 }
966
967 /* Clear the large page flags we covered. */
968 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
969 }
970# else
971 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
972 if (bmEntry)
973 {
974 /* Scan the non-zero 64-bit value completely unrolled: */
975 uintptr_t const idxEven = idxBitmap * 64;
976 uint64_t bmToClear = 0;
977# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
978 if (a_fNonGlobal) \
979 { \
980 if (bmEntry & a_bmNonGlobal) \
981 { \
982 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
983 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
984 { \
985 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
986 pTlb->aEntries[a_idxEvenIter].GCPhys, \
987 a_idxEvenIter, a_fDataTlb); \
988 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
989 bmToClear |= a_bmNonGlobal; \
990 } \
991 } \
992 else \
993 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
994 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
995 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
996 } \
997 if (a_fGlobal) \
998 { \
999 if (bmEntry & a_bmGlobal) \
1000 { \
1001 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
1002 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
1003 { \
1004 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
1005 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
1006 a_idxEvenIter + 1, a_fDataTlb); \
1007 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
1008 bmToClear |= a_bmGlobal; \
1009 } \
1010 } \
1011 else \
1012 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
1013 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
1014 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
1015 } ((void)0)
1016# define FOUR_PAIRS(a_iByte, a_cShift) \
1017 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \
1018 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \
1019 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \
1020 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)
1021 if (bmEntry & (uint32_t)UINT16_MAX)
1022 {
1023 FOUR_PAIRS(0, 0);
1024 FOUR_PAIRS(1, 8);
1025 }
1026 if (bmEntry & ((uint32_t)UINT16_MAX << 16))
1027 {
1028 FOUR_PAIRS(2, 16);
1029 FOUR_PAIRS(3, 24);
1030 }
1031 if (bmEntry & ((uint64_t)UINT16_MAX << 32))
1032 {
1033 FOUR_PAIRS(4, 32);
1034 FOUR_PAIRS(5, 40);
1035 }
1036 if (bmEntry & ((uint64_t)UINT16_MAX << 16))
1037 {
1038 FOUR_PAIRS(6, 48);
1039 FOUR_PAIRS(7, 56);
1040 }
1041# undef FOUR_PAIRS
1042
1043 /* Clear the large page flags we covered. */
1044 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
1045 }
1046# endif
1047
1048 /* advance */
1049 idxBitmap++;
1050 if (idxBitmap >= idxBitmapEnd)
1051 break;
1052 if (a_fNonGlobal)
1053 GCPtrTag += 32;
1054 if (a_fGlobal)
1055 GCPtrTagGlob += 32;
1056 }
1057
1058#else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
1059
1060 for (; idxEven < idxEvenEnd; idxEven += 8)
1061 {
1062# define ONE_ITERATION(a_idxEvenIter) \
1063 if (a_fNonGlobal) \
1064 { \
1065 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \
1066 { \
1067 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
1068 { \
1069 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \
1070 a_idxEvenIter, a_fDataTlb); \
1071 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
1072 } \
1073 } \
1074 GCPtrTag++; \
1075 } \
1076 \
1077 if (a_fGlobal) \
1078 { \
1079 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \
1080 { \
1081 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
1082 { \
1083 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
1084 a_idxEvenIter + 1, a_fDataTlb); \
1085 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
1086 } \
1087 } \
1088 GCPtrTagGlob++; \
1089 }
1090 if (idxEven < idxEvenEnd - 4)
1091 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);
1092 ONE_ITERATION(idxEven)
1093 ONE_ITERATION(idxEven + 2)
1094 ONE_ITERATION(idxEven + 4)
1095 ONE_ITERATION(idxEven + 6)
1096# undef ONE_ITERATION
1097 }
1098#endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
1099}
1100
1101template<bool const a_fDataTlb, bool const a_f2MbLargePage>
1102DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
1103 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
1104{
1105 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
1106
1107 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
1108 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
1109 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
1110 {
1111 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
1112 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
1113 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1114 else
1115 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1116 }
1117 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
1118 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
1119 {
1120 /* Large pages aren't as likely in the non-global TLB half. */
1121 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
1122 }
1123 else
1124 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1125}
1126
1127template<bool const a_fDataTlb>
1128DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT
1129{
1130 pTlb->cTlbInvlPg += 1;
1131
1132 /*
1133 * Flush the entry pair.
1134 */
1135 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
1136 {
1137 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
1138 pTlb->aEntries[idxEven].uTag = 0;
1139 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
1140 pVCpu->iem.s.cbInstrBufTotal = 0;
1141 }
1142 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
1143 {
1144 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
1145 pTlb->aEntries[idxEven + 1].uTag = 0;
1146 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
1147 pVCpu->iem.s.cbInstrBufTotal = 0;
1148 }
1149
1150 /*
1151 * If there are (or has been) large pages in the TLB, we must check if the
1152 * address being flushed may involve one of those, as then we'd have to
1153 * scan for entries relating to the same page and flush those as well.
1154 */
1155# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
1156 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
1157# else
1158 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
1159# endif
1160 {
1161 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
1162 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1163 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1164 else
1165 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1166 }
1167}
1168
1169#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
1170
1171/**
1172 * Invalidates a page in the TLBs.
1173 *
1174 * @param pVCpu The cross context virtual CPU structure of the calling
1175 * thread.
1176 * @param GCPtr The address of the page to invalidate
1177 * @thread EMT(pVCpu)
1178 */
1179VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1180{
1181 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
1182#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1183 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
1184 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
1185 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
1186 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
1187
1188# ifdef IEM_WITH_CODE_TLB
1189 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
1190# endif
1191# ifdef IEM_WITH_DATA_TLB
1192 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
1193# endif
1194#else
1195 NOREF(pVCpu); NOREF(GCPtr);
1196#endif
1197}
1198
1199
1200#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1201/**
1202 * Invalid both TLBs slow fashion following a rollover.
1203 *
1204 * Worker for IEMTlbInvalidateAllPhysical,
1205 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
1206 * iemMemMapJmp and others.
1207 *
1208 * @thread EMT(pVCpu)
1209 */
1210static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
1211{
1212 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
1213 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
1214 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
1215
1216 unsigned i;
1217# ifdef IEM_WITH_CODE_TLB
1218 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1219 while (i-- > 0)
1220 {
1221 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1222 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
1223 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
1224 }
1225 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
1226 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1227# endif
1228# ifdef IEM_WITH_DATA_TLB
1229 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1230 while (i-- > 0)
1231 {
1232 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1233 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
1234 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
1235 }
1236 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
1237 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1238# endif
1239
1240}
1241#endif
1242
1243
1244/**
1245 * Invalidates the host physical aspects of the IEM TLBs.
1246 *
1247 * This is called internally as well as by PGM when moving GC mappings.
1248 *
1249 * @param pVCpu The cross context virtual CPU structure of the calling
1250 * thread.
1251 * @note Currently not used.
1252 */
1253VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1254{
1255#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1256 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1257 Log10(("IEMTlbInvalidateAllPhysical\n"));
1258
1259# ifdef IEM_WITH_CODE_TLB
1260 pVCpu->iem.s.cbInstrBufTotal = 0;
1261# endif
1262 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1263 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
1264 {
1265 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1266 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1267 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1268 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1269 }
1270 else
1271 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1272#else
1273 NOREF(pVCpu);
1274#endif
1275}
1276
1277
1278/**
1279 * Invalidates the host physical aspects of the IEM TLBs.
1280 *
1281 * This is called internally as well as by PGM when moving GC mappings.
1282 *
1283 * @param pVM The cross context VM structure.
1284 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1285 * otherwise NIL_VMCPUID.
1286 * @param enmReason The reason we're called.
1287 *
1288 * @remarks Caller holds the PGM lock.
1289 */
1290VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1291{
1292#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1293 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1294 if (pVCpuCaller)
1295 VMCPU_ASSERT_EMT(pVCpuCaller);
1296 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1297
1298 VMCC_FOR_EACH_VMCPU(pVM)
1299 {
1300# ifdef IEM_WITH_CODE_TLB
1301 if (pVCpuCaller == pVCpu)
1302 pVCpu->iem.s.cbInstrBufTotal = 0;
1303# endif
1304
1305 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1306 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1307 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1308 { /* likely */}
1309 else if (pVCpuCaller != pVCpu)
1310 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1311 else
1312 {
1313 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1314 continue;
1315 }
1316 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1317 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1318
1319 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1320 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1321 }
1322 VMCC_FOR_EACH_VMCPU_END(pVM);
1323
1324#else
1325 RT_NOREF(pVM, idCpuCaller, enmReason);
1326#endif
1327}
1328
1329
1330/**
1331 * Flushes the prefetch buffer, light version.
1332 */
1333void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1334{
1335#ifndef IEM_WITH_CODE_TLB
1336 pVCpu->iem.s.cbOpcode = cbInstr;
1337#else
1338 RT_NOREF(pVCpu, cbInstr);
1339#endif
1340}
1341
1342
1343/**
1344 * Flushes the prefetch buffer, heavy version.
1345 */
1346void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1347{
1348#ifndef IEM_WITH_CODE_TLB
1349 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1350#elif 1
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 RT_NOREF(cbInstr);
1353#else
1354 RT_NOREF(pVCpu, cbInstr);
1355#endif
1356}
1357
1358
1359
1360#ifdef IEM_WITH_CODE_TLB
1361
1362/**
1363 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1364 * failure and jumps.
1365 *
1366 * We end up here for a number of reasons:
1367 * - pbInstrBuf isn't yet initialized.
1368 * - Advancing beyond the buffer boundrary (e.g. cross page).
1369 * - Advancing beyond the CS segment limit.
1370 * - Fetching from non-mappable page (e.g. MMIO).
1371 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1372 *
1373 * @param pVCpu The cross context virtual CPU structure of the
1374 * calling thread.
1375 * @param pvDst Where to return the bytes.
1376 * @param cbDst Number of bytes to read. A value of zero is
1377 * allowed for initializing pbInstrBuf (the
1378 * recompiler does this). In this case it is best
1379 * to set pbInstrBuf to NULL prior to the call.
1380 */
1381void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1382{
1383# ifdef IN_RING3
1384 for (;;)
1385 {
1386 Assert(cbDst <= 8);
1387 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1388
1389 /*
1390 * We might have a partial buffer match, deal with that first to make the
1391 * rest simpler. This is the first part of the cross page/buffer case.
1392 */
1393 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1394 if (pbInstrBuf != NULL)
1395 {
1396 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1397 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1398 if (offBuf < cbInstrBuf)
1399 {
1400 Assert(offBuf + cbDst > cbInstrBuf);
1401 uint32_t const cbCopy = cbInstrBuf - offBuf;
1402 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1403
1404 cbDst -= cbCopy;
1405 pvDst = (uint8_t *)pvDst + cbCopy;
1406 offBuf += cbCopy;
1407 }
1408 }
1409
1410 /*
1411 * Check segment limit, figuring how much we're allowed to access at this point.
1412 *
1413 * We will fault immediately if RIP is past the segment limit / in non-canonical
1414 * territory. If we do continue, there are one or more bytes to read before we
1415 * end up in trouble and we need to do that first before faulting.
1416 */
1417 RTGCPTR GCPtrFirst;
1418 uint32_t cbMaxRead;
1419 if (IEM_IS_64BIT_CODE(pVCpu))
1420 {
1421 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1422 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1423 { /* likely */ }
1424 else
1425 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1426 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1427 }
1428 else
1429 {
1430 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1431 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1432 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1433 { /* likely */ }
1434 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1435 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1436 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1437 if (cbMaxRead != 0)
1438 { /* likely */ }
1439 else
1440 {
1441 /* Overflowed because address is 0 and limit is max. */
1442 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1443 cbMaxRead = X86_PAGE_SIZE;
1444 }
1445 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1446 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1447 if (cbMaxRead2 < cbMaxRead)
1448 cbMaxRead = cbMaxRead2;
1449 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1450 }
1451
1452 /*
1453 * Get the TLB entry for this piece of code.
1454 */
1455 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1456 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1457 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1458 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1459 {
1460 /* likely when executing lots of code, otherwise unlikely */
1461# ifdef IEM_WITH_TLB_STATISTICS
1462 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1463# endif
1464 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1465
1466 /* Check TLB page table level access flags. */
1467 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1468 {
1469 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1470 {
1471 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1472 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1473 }
1474 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1475 {
1476 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1477 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1478 }
1479 }
1480
1481 /* Look up the physical page info if necessary. */
1482 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1483 { /* not necessary */ }
1484 else
1485 {
1486 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1487 { /* likely */ }
1488 else
1489 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1490 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1491 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1492 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1493 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1494 }
1495 }
1496 else
1497 {
1498 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1499
1500 /* This page table walking will set A bits as required by the access while performing the walk.
1501 ASSUMES these are set when the address is translated rather than on commit... */
1502 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1503 PGMPTWALKFAST WalkFast;
1504 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1505 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1506 &WalkFast);
1507 if (RT_SUCCESS(rc))
1508 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1509 else
1510 {
1511# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1512 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1513 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1514# endif
1515 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1516 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1517 }
1518
1519 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1520 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1521 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1522 {
1523 pTlbe--;
1524 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1525 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1526 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1527# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1528 else
1529 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
1530# endif
1531 }
1532 else
1533 {
1534 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1535 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1536 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1537 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1538# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1539 else
1540 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
1541# endif
1542 }
1543 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1544 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1545 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1546 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1547 pTlbe->GCPhys = GCPhysPg;
1548 pTlbe->pbMappingR3 = NULL;
1549 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1550 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1551 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1552
1553 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
1554 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1555 else
1556 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1557
1558 /* Resolve the physical address. */
1559 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1560 { /* likely */ }
1561 else
1562 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1563 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1564 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1565 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1566 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1567 }
1568
1569# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1570 /*
1571 * Try do a direct read using the pbMappingR3 pointer.
1572 * Note! Do not recheck the physical TLB revision number here as we have the
1573 * wrong response to changes in the else case. If someone is updating
1574 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1575 * pretending we always won the race.
1576 */
1577 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1578 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1579 {
1580 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1581 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1582 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1583 {
1584 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1585 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1586 }
1587 else
1588 {
1589 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1590 if (cbInstr + (uint32_t)cbDst <= 15)
1591 {
1592 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1593 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1594 }
1595 else
1596 {
1597 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1598 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1599 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1600 }
1601 }
1602 if (cbDst <= cbMaxRead)
1603 {
1604 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1605# if 0 /* unused */
1606 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1607# endif
1608 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1609 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1610 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1611 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1612 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1613 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1614 else
1615 Assert(!pvDst);
1616 return;
1617 }
1618 pVCpu->iem.s.pbInstrBuf = NULL;
1619
1620 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1621 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1622 }
1623# else
1624# error "refactor as needed"
1625 /*
1626 * If there is no special read handling, so we can read a bit more and
1627 * put it in the prefetch buffer.
1628 */
1629 if ( cbDst < cbMaxRead
1630 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1631 {
1632 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1633 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1634 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1635 { /* likely */ }
1636 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1637 {
1638 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1639 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1640 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1641 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1642 }
1643 else
1644 {
1645 Log((RT_SUCCESS(rcStrict)
1646 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1647 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1648 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1649 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1650 }
1651 }
1652# endif
1653 /*
1654 * Special read handling, so only read exactly what's needed.
1655 * This is a highly unlikely scenario.
1656 */
1657 else
1658 {
1659 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1660
1661 /* Check instruction length. */
1662 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1663 if (RT_LIKELY(cbInstr + cbDst <= 15))
1664 { /* likely */ }
1665 else
1666 {
1667 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1668 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1669 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1670 }
1671
1672 /* Do the reading. */
1673 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1674 if (cbToRead > 0)
1675 {
1676 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1677 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1678 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1679 { /* likely */ }
1680 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1681 {
1682 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1683 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1684 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1685 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1686 }
1687 else
1688 {
1689 Log((RT_SUCCESS(rcStrict)
1690 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1691 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1692 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1693 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1694 }
1695 }
1696
1697 /* Update the state and probably return. */
1698 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1699 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1700# if 0 /* unused */
1701 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1702# endif
1703 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1704 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1705 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1706 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1707 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1708 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1709 pVCpu->iem.s.pbInstrBuf = NULL;
1710 if (cbToRead == cbDst)
1711 return;
1712 Assert(cbToRead == cbMaxRead);
1713 }
1714
1715 /*
1716 * More to read, loop.
1717 */
1718 cbDst -= cbMaxRead;
1719 pvDst = (uint8_t *)pvDst + cbMaxRead;
1720 }
1721# else /* !IN_RING3 */
1722 RT_NOREF(pvDst, cbDst);
1723 if (pvDst || cbDst)
1724 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1725# endif /* !IN_RING3 */
1726}
1727
1728#else /* !IEM_WITH_CODE_TLB */
1729
1730/**
1731 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1732 * exception if it fails.
1733 *
1734 * @returns Strict VBox status code.
1735 * @param pVCpu The cross context virtual CPU structure of the
1736 * calling thread.
1737 * @param cbMin The minimum number of bytes relative offOpcode
1738 * that must be read.
1739 */
1740VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1741{
1742 /*
1743 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1744 *
1745 * First translate CS:rIP to a physical address.
1746 */
1747 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1748 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1749 uint8_t const cbLeft = cbOpcode - offOpcode;
1750 Assert(cbLeft < cbMin);
1751 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1752
1753 uint32_t cbToTryRead;
1754 RTGCPTR GCPtrNext;
1755 if (IEM_IS_64BIT_CODE(pVCpu))
1756 {
1757 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1758 if (!IEM_IS_CANONICAL(GCPtrNext))
1759 return iemRaiseGeneralProtectionFault0(pVCpu);
1760 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1761 }
1762 else
1763 {
1764 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1765 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1766 GCPtrNext32 += cbOpcode;
1767 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1768 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1769 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1770 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1771 if (!cbToTryRead) /* overflowed */
1772 {
1773 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1774 cbToTryRead = UINT32_MAX;
1775 /** @todo check out wrapping around the code segment. */
1776 }
1777 if (cbToTryRead < cbMin - cbLeft)
1778 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1779 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1780
1781 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1782 if (cbToTryRead > cbLeftOnPage)
1783 cbToTryRead = cbLeftOnPage;
1784 }
1785
1786 /* Restrict to opcode buffer space.
1787
1788 We're making ASSUMPTIONS here based on work done previously in
1789 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1790 be fetched in case of an instruction crossing two pages. */
1791 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1792 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1793 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1794 { /* likely */ }
1795 else
1796 {
1797 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1798 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1799 return iemRaiseGeneralProtectionFault0(pVCpu);
1800 }
1801
1802 PGMPTWALKFAST WalkFast;
1803 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1804 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1805 &WalkFast);
1806 if (RT_SUCCESS(rc))
1807 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1808 else
1809 {
1810 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1811#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1812 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1813 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1814#endif
1815 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1816 }
1817 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1818 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1819
1820 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1821 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1822
1823 /*
1824 * Read the bytes at this address.
1825 *
1826 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1827 * and since PATM should only patch the start of an instruction there
1828 * should be no need to check again here.
1829 */
1830 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1831 {
1832 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1833 cbToTryRead, PGMACCESSORIGIN_IEM);
1834 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1835 { /* likely */ }
1836 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1837 {
1838 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1839 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1840 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1841 }
1842 else
1843 {
1844 Log((RT_SUCCESS(rcStrict)
1845 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1846 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1847 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1848 return rcStrict;
1849 }
1850 }
1851 else
1852 {
1853 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1854 if (RT_SUCCESS(rc))
1855 { /* likely */ }
1856 else
1857 {
1858 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1859 return rc;
1860 }
1861 }
1862 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1863 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1864
1865 return VINF_SUCCESS;
1866}
1867
1868#endif /* !IEM_WITH_CODE_TLB */
1869#ifndef IEM_WITH_SETJMP
1870
1871/**
1872 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1873 *
1874 * @returns Strict VBox status code.
1875 * @param pVCpu The cross context virtual CPU structure of the
1876 * calling thread.
1877 * @param pb Where to return the opcode byte.
1878 */
1879VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1880{
1881 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1882 if (rcStrict == VINF_SUCCESS)
1883 {
1884 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1885 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1886 pVCpu->iem.s.offOpcode = offOpcode + 1;
1887 }
1888 else
1889 *pb = 0;
1890 return rcStrict;
1891}
1892
1893#else /* IEM_WITH_SETJMP */
1894
1895/**
1896 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1897 *
1898 * @returns The opcode byte.
1899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1900 */
1901uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1902{
1903# ifdef IEM_WITH_CODE_TLB
1904 uint8_t u8;
1905 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1906 return u8;
1907# else
1908 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1909 if (rcStrict == VINF_SUCCESS)
1910 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1911 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1912# endif
1913}
1914
1915#endif /* IEM_WITH_SETJMP */
1916
1917#ifndef IEM_WITH_SETJMP
1918
1919/**
1920 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1921 *
1922 * @returns Strict VBox status code.
1923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1924 * @param pu16 Where to return the opcode dword.
1925 */
1926VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1927{
1928 uint8_t u8;
1929 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1930 if (rcStrict == VINF_SUCCESS)
1931 *pu16 = (int8_t)u8;
1932 return rcStrict;
1933}
1934
1935
1936/**
1937 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1938 *
1939 * @returns Strict VBox status code.
1940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1941 * @param pu32 Where to return the opcode dword.
1942 */
1943VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1944{
1945 uint8_t u8;
1946 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1947 if (rcStrict == VINF_SUCCESS)
1948 *pu32 = (int8_t)u8;
1949 return rcStrict;
1950}
1951
1952
1953/**
1954 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1955 *
1956 * @returns Strict VBox status code.
1957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1958 * @param pu64 Where to return the opcode qword.
1959 */
1960VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1961{
1962 uint8_t u8;
1963 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1964 if (rcStrict == VINF_SUCCESS)
1965 *pu64 = (int8_t)u8;
1966 return rcStrict;
1967}
1968
1969#endif /* !IEM_WITH_SETJMP */
1970
1971
1972#ifndef IEM_WITH_SETJMP
1973
1974/**
1975 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1976 *
1977 * @returns Strict VBox status code.
1978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1979 * @param pu16 Where to return the opcode word.
1980 */
1981VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1982{
1983 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1984 if (rcStrict == VINF_SUCCESS)
1985 {
1986 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1987# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1988 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1989# else
1990 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1991# endif
1992 pVCpu->iem.s.offOpcode = offOpcode + 2;
1993 }
1994 else
1995 *pu16 = 0;
1996 return rcStrict;
1997}
1998
1999#else /* IEM_WITH_SETJMP */
2000
2001/**
2002 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2003 *
2004 * @returns The opcode word.
2005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2006 */
2007uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2008{
2009# ifdef IEM_WITH_CODE_TLB
2010 uint16_t u16;
2011 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2012 return u16;
2013# else
2014 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2015 if (rcStrict == VINF_SUCCESS)
2016 {
2017 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2018 pVCpu->iem.s.offOpcode += 2;
2019# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2020 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2021# else
2022 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2023# endif
2024 }
2025 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2026# endif
2027}
2028
2029#endif /* IEM_WITH_SETJMP */
2030
2031#ifndef IEM_WITH_SETJMP
2032
2033/**
2034 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2035 *
2036 * @returns Strict VBox status code.
2037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2038 * @param pu32 Where to return the opcode double word.
2039 */
2040VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
2041{
2042 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2043 if (rcStrict == VINF_SUCCESS)
2044 {
2045 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2046 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2047 pVCpu->iem.s.offOpcode = offOpcode + 2;
2048 }
2049 else
2050 *pu32 = 0;
2051 return rcStrict;
2052}
2053
2054
2055/**
2056 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2057 *
2058 * @returns Strict VBox status code.
2059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2060 * @param pu64 Where to return the opcode quad word.
2061 */
2062VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2063{
2064 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2065 if (rcStrict == VINF_SUCCESS)
2066 {
2067 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2068 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2069 pVCpu->iem.s.offOpcode = offOpcode + 2;
2070 }
2071 else
2072 *pu64 = 0;
2073 return rcStrict;
2074}
2075
2076#endif /* !IEM_WITH_SETJMP */
2077
2078#ifndef IEM_WITH_SETJMP
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2082 *
2083 * @returns Strict VBox status code.
2084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2085 * @param pu32 Where to return the opcode dword.
2086 */
2087VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
2088{
2089 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2090 if (rcStrict == VINF_SUCCESS)
2091 {
2092 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2094 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2095# else
2096 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2097 pVCpu->iem.s.abOpcode[offOpcode + 1],
2098 pVCpu->iem.s.abOpcode[offOpcode + 2],
2099 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2100# endif
2101 pVCpu->iem.s.offOpcode = offOpcode + 4;
2102 }
2103 else
2104 *pu32 = 0;
2105 return rcStrict;
2106}
2107
2108#else /* IEM_WITH_SETJMP */
2109
2110/**
2111 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2112 *
2113 * @returns The opcode dword.
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 */
2116uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2117{
2118# ifdef IEM_WITH_CODE_TLB
2119 uint32_t u32;
2120 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2121 return u32;
2122# else
2123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2124 if (rcStrict == VINF_SUCCESS)
2125 {
2126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2127 pVCpu->iem.s.offOpcode = offOpcode + 4;
2128# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2129 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2130# else
2131 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2132 pVCpu->iem.s.abOpcode[offOpcode + 1],
2133 pVCpu->iem.s.abOpcode[offOpcode + 2],
2134 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2135# endif
2136 }
2137 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2138# endif
2139}
2140
2141#endif /* IEM_WITH_SETJMP */
2142
2143#ifndef IEM_WITH_SETJMP
2144
2145/**
2146 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2147 *
2148 * @returns Strict VBox status code.
2149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2150 * @param pu64 Where to return the opcode dword.
2151 */
2152VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2153{
2154 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2155 if (rcStrict == VINF_SUCCESS)
2156 {
2157 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2158 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2159 pVCpu->iem.s.abOpcode[offOpcode + 1],
2160 pVCpu->iem.s.abOpcode[offOpcode + 2],
2161 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2162 pVCpu->iem.s.offOpcode = offOpcode + 4;
2163 }
2164 else
2165 *pu64 = 0;
2166 return rcStrict;
2167}
2168
2169
2170/**
2171 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2172 *
2173 * @returns Strict VBox status code.
2174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2175 * @param pu64 Where to return the opcode qword.
2176 */
2177VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2178{
2179 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2180 if (rcStrict == VINF_SUCCESS)
2181 {
2182 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2183 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2184 pVCpu->iem.s.abOpcode[offOpcode + 1],
2185 pVCpu->iem.s.abOpcode[offOpcode + 2],
2186 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2187 pVCpu->iem.s.offOpcode = offOpcode + 4;
2188 }
2189 else
2190 *pu64 = 0;
2191 return rcStrict;
2192}
2193
2194#endif /* !IEM_WITH_SETJMP */
2195
2196#ifndef IEM_WITH_SETJMP
2197
2198/**
2199 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2200 *
2201 * @returns Strict VBox status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pu64 Where to return the opcode qword.
2204 */
2205VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2206{
2207 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2208 if (rcStrict == VINF_SUCCESS)
2209 {
2210 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2211# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2212 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2213# else
2214 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2215 pVCpu->iem.s.abOpcode[offOpcode + 1],
2216 pVCpu->iem.s.abOpcode[offOpcode + 2],
2217 pVCpu->iem.s.abOpcode[offOpcode + 3],
2218 pVCpu->iem.s.abOpcode[offOpcode + 4],
2219 pVCpu->iem.s.abOpcode[offOpcode + 5],
2220 pVCpu->iem.s.abOpcode[offOpcode + 6],
2221 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2222# endif
2223 pVCpu->iem.s.offOpcode = offOpcode + 8;
2224 }
2225 else
2226 *pu64 = 0;
2227 return rcStrict;
2228}
2229
2230#else /* IEM_WITH_SETJMP */
2231
2232/**
2233 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2234 *
2235 * @returns The opcode qword.
2236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2237 */
2238uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2239{
2240# ifdef IEM_WITH_CODE_TLB
2241 uint64_t u64;
2242 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2243 return u64;
2244# else
2245 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2246 if (rcStrict == VINF_SUCCESS)
2247 {
2248 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2249 pVCpu->iem.s.offOpcode = offOpcode + 8;
2250# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2251 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2252# else
2253 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2254 pVCpu->iem.s.abOpcode[offOpcode + 1],
2255 pVCpu->iem.s.abOpcode[offOpcode + 2],
2256 pVCpu->iem.s.abOpcode[offOpcode + 3],
2257 pVCpu->iem.s.abOpcode[offOpcode + 4],
2258 pVCpu->iem.s.abOpcode[offOpcode + 5],
2259 pVCpu->iem.s.abOpcode[offOpcode + 6],
2260 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2261# endif
2262 }
2263 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2264# endif
2265}
2266
2267#endif /* IEM_WITH_SETJMP */
2268
2269
2270
2271/** @name Misc Worker Functions.
2272 * @{
2273 */
2274
2275/**
2276 * Gets the exception class for the specified exception vector.
2277 *
2278 * @returns The class of the specified exception.
2279 * @param uVector The exception vector.
2280 */
2281static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
2282{
2283 Assert(uVector <= X86_XCPT_LAST);
2284 switch (uVector)
2285 {
2286 case X86_XCPT_DE:
2287 case X86_XCPT_TS:
2288 case X86_XCPT_NP:
2289 case X86_XCPT_SS:
2290 case X86_XCPT_GP:
2291 case X86_XCPT_SX: /* AMD only */
2292 return IEMXCPTCLASS_CONTRIBUTORY;
2293
2294 case X86_XCPT_PF:
2295 case X86_XCPT_VE: /* Intel only */
2296 return IEMXCPTCLASS_PAGE_FAULT;
2297
2298 case X86_XCPT_DF:
2299 return IEMXCPTCLASS_DOUBLE_FAULT;
2300 }
2301 return IEMXCPTCLASS_BENIGN;
2302}
2303
2304
2305/**
2306 * Evaluates how to handle an exception caused during delivery of another event
2307 * (exception / interrupt).
2308 *
2309 * @returns How to handle the recursive exception.
2310 * @param pVCpu The cross context virtual CPU structure of the
2311 * calling thread.
2312 * @param fPrevFlags The flags of the previous event.
2313 * @param uPrevVector The vector of the previous event.
2314 * @param fCurFlags The flags of the current exception.
2315 * @param uCurVector The vector of the current exception.
2316 * @param pfXcptRaiseInfo Where to store additional information about the
2317 * exception condition. Optional.
2318 */
2319VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2320 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2321{
2322 /*
2323 * Only CPU exceptions can be raised while delivering other events, software interrupt
2324 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2325 */
2326 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2327 Assert(pVCpu); RT_NOREF(pVCpu);
2328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2329
2330 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2331 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2332 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2333 {
2334 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2335 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2336 {
2337 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2338 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2339 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2340 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2341 {
2342 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2343 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2344 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2345 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2346 uCurVector, pVCpu->cpum.GstCtx.cr2));
2347 }
2348 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2349 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2350 {
2351 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2352 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2353 }
2354 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2355 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2356 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2357 {
2358 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2359 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2360 }
2361 }
2362 else
2363 {
2364 if (uPrevVector == X86_XCPT_NMI)
2365 {
2366 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2367 if (uCurVector == X86_XCPT_PF)
2368 {
2369 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2370 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2371 }
2372 }
2373 else if ( uPrevVector == X86_XCPT_AC
2374 && uCurVector == X86_XCPT_AC)
2375 {
2376 enmRaise = IEMXCPTRAISE_CPU_HANG;
2377 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2378 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2379 }
2380 }
2381 }
2382 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2383 {
2384 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2385 if (uCurVector == X86_XCPT_PF)
2386 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2387 }
2388 else
2389 {
2390 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2391 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2392 }
2393
2394 if (pfXcptRaiseInfo)
2395 *pfXcptRaiseInfo = fRaiseInfo;
2396 return enmRaise;
2397}
2398
2399
2400/**
2401 * Enters the CPU shutdown state initiated by a triple fault or other
2402 * unrecoverable conditions.
2403 *
2404 * @returns Strict VBox status code.
2405 * @param pVCpu The cross context virtual CPU structure of the
2406 * calling thread.
2407 */
2408static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2409{
2410 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2411 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2412
2413 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2414 {
2415 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2416 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2417 }
2418
2419 RT_NOREF(pVCpu);
2420 return VINF_EM_TRIPLE_FAULT;
2421}
2422
2423
2424/**
2425 * Validates a new SS segment.
2426 *
2427 * @returns VBox strict status code.
2428 * @param pVCpu The cross context virtual CPU structure of the
2429 * calling thread.
2430 * @param NewSS The new SS selctor.
2431 * @param uCpl The CPL to load the stack for.
2432 * @param pDesc Where to return the descriptor.
2433 */
2434static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2435{
2436 /* Null selectors are not allowed (we're not called for dispatching
2437 interrupts with SS=0 in long mode). */
2438 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2439 {
2440 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2441 return iemRaiseTaskSwitchFault0(pVCpu);
2442 }
2443
2444 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2445 if ((NewSS & X86_SEL_RPL) != uCpl)
2446 {
2447 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2448 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2449 }
2450
2451 /*
2452 * Read the descriptor.
2453 */
2454 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2455 if (rcStrict != VINF_SUCCESS)
2456 return rcStrict;
2457
2458 /*
2459 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2460 */
2461 if (!pDesc->Legacy.Gen.u1DescType)
2462 {
2463 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2464 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2465 }
2466
2467 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2468 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2469 {
2470 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2471 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2472 }
2473 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2474 {
2475 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2476 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2477 }
2478
2479 /* Is it there? */
2480 /** @todo testcase: Is this checked before the canonical / limit check below? */
2481 if (!pDesc->Legacy.Gen.u1Present)
2482 {
2483 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2484 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2485 }
2486
2487 return VINF_SUCCESS;
2488}
2489
2490/** @} */
2491
2492
2493/** @name Raising Exceptions.
2494 *
2495 * @{
2496 */
2497
2498
2499/**
2500 * Loads the specified stack far pointer from the TSS.
2501 *
2502 * @returns VBox strict status code.
2503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2504 * @param uCpl The CPL to load the stack for.
2505 * @param pSelSS Where to return the new stack segment.
2506 * @param puEsp Where to return the new stack pointer.
2507 */
2508static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2509{
2510 VBOXSTRICTRC rcStrict;
2511 Assert(uCpl < 4);
2512
2513 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2514 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2515 {
2516 /*
2517 * 16-bit TSS (X86TSS16).
2518 */
2519 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2520 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2521 {
2522 uint32_t off = uCpl * 4 + 2;
2523 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2524 {
2525 /** @todo check actual access pattern here. */
2526 uint32_t u32Tmp = 0; /* gcc maybe... */
2527 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2528 if (rcStrict == VINF_SUCCESS)
2529 {
2530 *puEsp = RT_LOWORD(u32Tmp);
2531 *pSelSS = RT_HIWORD(u32Tmp);
2532 return VINF_SUCCESS;
2533 }
2534 }
2535 else
2536 {
2537 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2538 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2539 }
2540 break;
2541 }
2542
2543 /*
2544 * 32-bit TSS (X86TSS32).
2545 */
2546 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2547 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2548 {
2549 uint32_t off = uCpl * 8 + 4;
2550 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2551 {
2552/** @todo check actual access pattern here. */
2553 uint64_t u64Tmp;
2554 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2555 if (rcStrict == VINF_SUCCESS)
2556 {
2557 *puEsp = u64Tmp & UINT32_MAX;
2558 *pSelSS = (RTSEL)(u64Tmp >> 32);
2559 return VINF_SUCCESS;
2560 }
2561 }
2562 else
2563 {
2564 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2565 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2566 }
2567 break;
2568 }
2569
2570 default:
2571 AssertFailed();
2572 rcStrict = VERR_IEM_IPE_4;
2573 break;
2574 }
2575
2576 *puEsp = 0; /* make gcc happy */
2577 *pSelSS = 0; /* make gcc happy */
2578 return rcStrict;
2579}
2580
2581
2582/**
2583 * Loads the specified stack pointer from the 64-bit TSS.
2584 *
2585 * @returns VBox strict status code.
2586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2587 * @param uCpl The CPL to load the stack for.
2588 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2589 * @param puRsp Where to return the new stack pointer.
2590 */
2591static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2592{
2593 Assert(uCpl < 4);
2594 Assert(uIst < 8);
2595 *puRsp = 0; /* make gcc happy */
2596
2597 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2598 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2599
2600 uint32_t off;
2601 if (uIst)
2602 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2603 else
2604 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2605 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2606 {
2607 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2608 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2609 }
2610
2611 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2612}
2613
2614
2615/**
2616 * Adjust the CPU state according to the exception being raised.
2617 *
2618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2619 * @param u8Vector The exception that has been raised.
2620 */
2621DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2622{
2623 switch (u8Vector)
2624 {
2625 case X86_XCPT_DB:
2626 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2627 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2628 break;
2629 /** @todo Read the AMD and Intel exception reference... */
2630 }
2631}
2632
2633
2634/**
2635 * Implements exceptions and interrupts for real mode.
2636 *
2637 * @returns VBox strict status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param cbInstr The number of bytes to offset rIP by in the return
2640 * address.
2641 * @param u8Vector The interrupt / exception vector number.
2642 * @param fFlags The flags.
2643 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2644 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2645 */
2646static VBOXSTRICTRC
2647iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2648 uint8_t cbInstr,
2649 uint8_t u8Vector,
2650 uint32_t fFlags,
2651 uint16_t uErr,
2652 uint64_t uCr2) RT_NOEXCEPT
2653{
2654 NOREF(uErr); NOREF(uCr2);
2655 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2656
2657 /*
2658 * Read the IDT entry.
2659 */
2660 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2661 {
2662 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2663 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2664 }
2665 RTFAR16 Idte;
2666 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2667 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2668 {
2669 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2670 return rcStrict;
2671 }
2672
2673#ifdef LOG_ENABLED
2674 /* If software interrupt, try decode it if logging is enabled and such. */
2675 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2676 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2677 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2678#endif
2679
2680 /*
2681 * Push the stack frame.
2682 */
2683 uint8_t bUnmapInfo;
2684 uint16_t *pu16Frame;
2685 uint64_t uNewRsp;
2686 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2687 if (rcStrict != VINF_SUCCESS)
2688 return rcStrict;
2689
2690 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2691#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2692 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2693 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2694 fEfl |= UINT16_C(0xf000);
2695#endif
2696 pu16Frame[2] = (uint16_t)fEfl;
2697 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2698 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2699 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2700 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2701 return rcStrict;
2702
2703 /*
2704 * Load the vector address into cs:ip and make exception specific state
2705 * adjustments.
2706 */
2707 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2708 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2709 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2710 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2711 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2712 pVCpu->cpum.GstCtx.rip = Idte.off;
2713 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2714 IEMMISC_SET_EFL(pVCpu, fEfl);
2715
2716 /** @todo do we actually do this in real mode? */
2717 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2718 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2719
2720 /*
2721 * Deal with debug events that follows the exception and clear inhibit flags.
2722 */
2723 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2724 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2725 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2726 else
2727 {
2728 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2729 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2730 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2731 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2732 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2733 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2734 return iemRaiseDebugException(pVCpu);
2735 }
2736
2737 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2738 so best leave them alone in case we're in a weird kind of real mode... */
2739
2740 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2741}
2742
2743
2744/**
2745 * Loads a NULL data selector into when coming from V8086 mode.
2746 *
2747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2748 * @param pSReg Pointer to the segment register.
2749 */
2750DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2751{
2752 pSReg->Sel = 0;
2753 pSReg->ValidSel = 0;
2754 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2755 {
2756 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2757 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2758 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2759 }
2760 else
2761 {
2762 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2763 /** @todo check this on AMD-V */
2764 pSReg->u64Base = 0;
2765 pSReg->u32Limit = 0;
2766 }
2767}
2768
2769
2770/**
2771 * Loads a segment selector during a task switch in V8086 mode.
2772 *
2773 * @param pSReg Pointer to the segment register.
2774 * @param uSel The selector value to load.
2775 */
2776DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2777{
2778 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2779 pSReg->Sel = uSel;
2780 pSReg->ValidSel = uSel;
2781 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2782 pSReg->u64Base = uSel << 4;
2783 pSReg->u32Limit = 0xffff;
2784 pSReg->Attr.u = 0xf3;
2785}
2786
2787
2788/**
2789 * Loads a segment selector during a task switch in protected mode.
2790 *
2791 * In this task switch scenario, we would throw \#TS exceptions rather than
2792 * \#GPs.
2793 *
2794 * @returns VBox strict status code.
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param pSReg Pointer to the segment register.
2797 * @param uSel The new selector value.
2798 *
2799 * @remarks This does _not_ handle CS or SS.
2800 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2801 */
2802static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2803{
2804 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2805
2806 /* Null data selector. */
2807 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2808 {
2809 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2810 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2811 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2812 return VINF_SUCCESS;
2813 }
2814
2815 /* Fetch the descriptor. */
2816 IEMSELDESC Desc;
2817 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2818 if (rcStrict != VINF_SUCCESS)
2819 {
2820 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2821 VBOXSTRICTRC_VAL(rcStrict)));
2822 return rcStrict;
2823 }
2824
2825 /* Must be a data segment or readable code segment. */
2826 if ( !Desc.Legacy.Gen.u1DescType
2827 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2828 {
2829 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2830 Desc.Legacy.Gen.u4Type));
2831 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2832 }
2833
2834 /* Check privileges for data segments and non-conforming code segments. */
2835 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2836 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2837 {
2838 /* The RPL and the new CPL must be less than or equal to the DPL. */
2839 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2840 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2841 {
2842 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2843 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2844 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2845 }
2846 }
2847
2848 /* Is it there? */
2849 if (!Desc.Legacy.Gen.u1Present)
2850 {
2851 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2852 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2853 }
2854
2855 /* The base and limit. */
2856 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2857 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2858
2859 /*
2860 * Ok, everything checked out fine. Now set the accessed bit before
2861 * committing the result into the registers.
2862 */
2863 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2864 {
2865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2869 }
2870
2871 /* Commit */
2872 pSReg->Sel = uSel;
2873 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2874 pSReg->u32Limit = cbLimit;
2875 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2876 pSReg->ValidSel = uSel;
2877 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2878 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2879 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2880
2881 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2882 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2883 return VINF_SUCCESS;
2884}
2885
2886
2887/**
2888 * Performs a task switch.
2889 *
2890 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2891 * caller is responsible for performing the necessary checks (like DPL, TSS
2892 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2893 * reference for JMP, CALL, IRET.
2894 *
2895 * If the task switch is the due to a software interrupt or hardware exception,
2896 * the caller is responsible for validating the TSS selector and descriptor. See
2897 * Intel Instruction reference for INT n.
2898 *
2899 * @returns VBox strict status code.
2900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2901 * @param enmTaskSwitch The cause of the task switch.
2902 * @param uNextEip The EIP effective after the task switch.
2903 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2904 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2905 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2906 * @param SelTss The TSS selector of the new task.
2907 * @param pNewDescTss Pointer to the new TSS descriptor.
2908 */
2909VBOXSTRICTRC
2910iemTaskSwitch(PVMCPUCC pVCpu,
2911 IEMTASKSWITCH enmTaskSwitch,
2912 uint32_t uNextEip,
2913 uint32_t fFlags,
2914 uint16_t uErr,
2915 uint64_t uCr2,
2916 RTSEL SelTss,
2917 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2918{
2919 Assert(!IEM_IS_REAL_MODE(pVCpu));
2920 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2921 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2922
2923 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2924 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2925 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2926 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2927 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2928
2929 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2930 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2931
2932 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2933 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2934
2935 /* Update CR2 in case it's a page-fault. */
2936 /** @todo This should probably be done much earlier in IEM/PGM. See
2937 * @bugref{5653#c49}. */
2938 if (fFlags & IEM_XCPT_FLAGS_CR2)
2939 pVCpu->cpum.GstCtx.cr2 = uCr2;
2940
2941 /*
2942 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2943 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2944 */
2945 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2946 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2947 if (uNewTssLimit < uNewTssLimitMin)
2948 {
2949 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2950 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2951 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2952 }
2953
2954 /*
2955 * Task switches in VMX non-root mode always cause task switches.
2956 * The new TSS must have been read and validated (DPL, limits etc.) before a
2957 * task-switch VM-exit commences.
2958 *
2959 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2960 */
2961 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2962 {
2963 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2964 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2965 }
2966
2967 /*
2968 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2969 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2970 */
2971 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2972 {
2973 uint64_t const uExitInfo1 = SelTss;
2974 uint64_t uExitInfo2 = uErr;
2975 switch (enmTaskSwitch)
2976 {
2977 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2978 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2979 default: break;
2980 }
2981 if (fFlags & IEM_XCPT_FLAGS_ERR)
2982 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2983 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2984 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2985
2986 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2987 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2988 RT_NOREF2(uExitInfo1, uExitInfo2);
2989 }
2990
2991 /*
2992 * Check the current TSS limit. The last written byte to the current TSS during the
2993 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2994 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2995 *
2996 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2997 * end up with smaller than "legal" TSS limits.
2998 */
2999 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3000 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
3001 if (uCurTssLimit < uCurTssLimitMin)
3002 {
3003 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
3004 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
3005 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
3006 }
3007
3008 /*
3009 * Verify that the new TSS can be accessed and map it. Map only the required contents
3010 * and not the entire TSS.
3011 */
3012 uint8_t bUnmapInfoNewTss;
3013 void *pvNewTss;
3014 uint32_t const cbNewTss = uNewTssLimitMin + 1;
3015 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
3016 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3017 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3018 * not perform correct translation if this happens. See Intel spec. 7.2.1
3019 * "Task-State Segment". */
3020 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
3021/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
3022 * Consider wrapping the remainder into a function for simpler cleanup. */
3023 if (rcStrict != VINF_SUCCESS)
3024 {
3025 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
3026 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
3027 return rcStrict;
3028 }
3029
3030 /*
3031 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3032 */
3033 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
3034 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3035 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3036 {
3037 uint8_t bUnmapInfoDescCurTss;
3038 PX86DESC pDescCurTss;
3039 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
3040 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
3041 if (rcStrict != VINF_SUCCESS)
3042 {
3043 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3044 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3045 return rcStrict;
3046 }
3047
3048 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3049 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
3050 if (rcStrict != VINF_SUCCESS)
3051 {
3052 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3053 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3054 return rcStrict;
3055 }
3056
3057 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3058 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3059 {
3060 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3061 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3062 fEFlags &= ~X86_EFL_NT;
3063 }
3064 }
3065
3066 /*
3067 * Save the CPU state into the current TSS.
3068 */
3069 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
3070 if (GCPtrNewTss == GCPtrCurTss)
3071 {
3072 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
3073 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3074 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
3075 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
3076 pVCpu->cpum.GstCtx.ldtr.Sel));
3077 }
3078 if (fIsNewTss386)
3079 {
3080 /*
3081 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3082 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3083 */
3084 uint8_t bUnmapInfoCurTss32;
3085 void *pvCurTss32;
3086 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
3087 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
3088 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3089 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
3090 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
3091 if (rcStrict != VINF_SUCCESS)
3092 {
3093 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
3094 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
3095 return rcStrict;
3096 }
3097
3098 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
3099 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
3100 pCurTss32->eip = uNextEip;
3101 pCurTss32->eflags = fEFlags;
3102 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
3103 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
3104 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
3105 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
3106 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
3107 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
3108 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
3109 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
3110 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
3111 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
3112 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
3113 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
3114 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
3115 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
3116
3117 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
3118 if (rcStrict != VINF_SUCCESS)
3119 {
3120 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3121 VBOXSTRICTRC_VAL(rcStrict)));
3122 return rcStrict;
3123 }
3124 }
3125 else
3126 {
3127 /*
3128 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3129 */
3130 uint8_t bUnmapInfoCurTss16;
3131 void *pvCurTss16;
3132 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
3133 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
3134 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3135 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
3136 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
3137 if (rcStrict != VINF_SUCCESS)
3138 {
3139 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
3140 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
3141 return rcStrict;
3142 }
3143
3144 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
3145 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
3146 pCurTss16->ip = uNextEip;
3147 pCurTss16->flags = (uint16_t)fEFlags;
3148 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
3149 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
3150 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
3151 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
3152 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
3153 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
3154 pCurTss16->si = pVCpu->cpum.GstCtx.si;
3155 pCurTss16->di = pVCpu->cpum.GstCtx.di;
3156 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
3157 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
3158 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
3159 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
3160
3161 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
3162 if (rcStrict != VINF_SUCCESS)
3163 {
3164 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3165 VBOXSTRICTRC_VAL(rcStrict)));
3166 return rcStrict;
3167 }
3168 }
3169
3170 /*
3171 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3172 */
3173 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3174 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3175 {
3176 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3177 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
3178 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
3179 }
3180
3181 /*
3182 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3183 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3184 */
3185 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3186 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3187 bool fNewDebugTrap;
3188 if (fIsNewTss386)
3189 {
3190 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
3191 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
3192 uNewEip = pNewTss32->eip;
3193 uNewEflags = pNewTss32->eflags;
3194 uNewEax = pNewTss32->eax;
3195 uNewEcx = pNewTss32->ecx;
3196 uNewEdx = pNewTss32->edx;
3197 uNewEbx = pNewTss32->ebx;
3198 uNewEsp = pNewTss32->esp;
3199 uNewEbp = pNewTss32->ebp;
3200 uNewEsi = pNewTss32->esi;
3201 uNewEdi = pNewTss32->edi;
3202 uNewES = pNewTss32->es;
3203 uNewCS = pNewTss32->cs;
3204 uNewSS = pNewTss32->ss;
3205 uNewDS = pNewTss32->ds;
3206 uNewFS = pNewTss32->fs;
3207 uNewGS = pNewTss32->gs;
3208 uNewLdt = pNewTss32->selLdt;
3209 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
3210 }
3211 else
3212 {
3213 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
3214 uNewCr3 = 0;
3215 uNewEip = pNewTss16->ip;
3216 uNewEflags = pNewTss16->flags;
3217 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
3218 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
3219 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
3220 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
3221 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
3222 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
3223 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
3224 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
3225 uNewES = pNewTss16->es;
3226 uNewCS = pNewTss16->cs;
3227 uNewSS = pNewTss16->ss;
3228 uNewDS = pNewTss16->ds;
3229 uNewFS = 0;
3230 uNewGS = 0;
3231 uNewLdt = pNewTss16->selLdt;
3232 fNewDebugTrap = false;
3233 }
3234
3235 if (GCPtrNewTss == GCPtrCurTss)
3236 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3237 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3238
3239 /*
3240 * We're done accessing the new TSS.
3241 */
3242 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
3243 if (rcStrict != VINF_SUCCESS)
3244 {
3245 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3246 return rcStrict;
3247 }
3248
3249 /*
3250 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3251 */
3252 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3253 {
3254 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
3255 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
3256 if (rcStrict != VINF_SUCCESS)
3257 {
3258 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3259 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3260 return rcStrict;
3261 }
3262
3263 /* Check that the descriptor indicates the new TSS is available (not busy). */
3264 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3265 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3266 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
3267
3268 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3269 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
3270 if (rcStrict != VINF_SUCCESS)
3271 {
3272 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3273 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3274 return rcStrict;
3275 }
3276 }
3277
3278 /*
3279 * From this point on, we're technically in the new task. We will defer exceptions
3280 * until the completion of the task switch but before executing any instructions in the new task.
3281 */
3282 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
3283 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
3284 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3285 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
3286 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
3287 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
3288 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3289
3290 /* Set the busy bit in TR. */
3291 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3292
3293 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3294 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3295 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3296 {
3297 uNewEflags |= X86_EFL_NT;
3298 }
3299
3300 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3301 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3302 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3303
3304 pVCpu->cpum.GstCtx.eip = uNewEip;
3305 pVCpu->cpum.GstCtx.eax = uNewEax;
3306 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3307 pVCpu->cpum.GstCtx.edx = uNewEdx;
3308 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3309 pVCpu->cpum.GstCtx.esp = uNewEsp;
3310 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3311 pVCpu->cpum.GstCtx.esi = uNewEsi;
3312 pVCpu->cpum.GstCtx.edi = uNewEdi;
3313
3314 uNewEflags &= X86_EFL_LIVE_MASK;
3315 uNewEflags |= X86_EFL_RA1_MASK;
3316 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3317
3318 /*
3319 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3320 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3321 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3322 */
3323 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3324 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3325
3326 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3327 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3328
3329 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3330 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3331
3332 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3333 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3334
3335 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3336 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3337
3338 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3339 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3340 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3341
3342 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3343 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3344 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3345 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3346
3347 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3348 {
3349 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3350 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3351 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3352 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3353 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3354 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3355 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3356 }
3357
3358 /*
3359 * Switch CR3 for the new task.
3360 */
3361 if ( fIsNewTss386
3362 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3363 {
3364 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3365 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3366 AssertRCSuccessReturn(rc, rc);
3367
3368 /* Inform PGM. */
3369 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3370 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3371 AssertRCReturn(rc, rc);
3372 /* ignore informational status codes */
3373
3374 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3375 }
3376
3377 /*
3378 * Switch LDTR for the new task.
3379 */
3380 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3381 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3382 else
3383 {
3384 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3385
3386 IEMSELDESC DescNewLdt;
3387 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3388 if (rcStrict != VINF_SUCCESS)
3389 {
3390 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3391 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3392 return rcStrict;
3393 }
3394 if ( !DescNewLdt.Legacy.Gen.u1Present
3395 || DescNewLdt.Legacy.Gen.u1DescType
3396 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3397 {
3398 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3399 uNewLdt, DescNewLdt.Legacy.u));
3400 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3401 }
3402
3403 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3404 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3405 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3406 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3407 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3408 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3409 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3410 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3411 }
3412
3413 IEMSELDESC DescSS;
3414 if (IEM_IS_V86_MODE(pVCpu))
3415 {
3416 IEM_SET_CPL(pVCpu, 3);
3417 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3418 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3419 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3423
3424 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3425 DescSS.Legacy.u = 0;
3426 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3427 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3428 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3429 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3430 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3431 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3432 DescSS.Legacy.Gen.u2Dpl = 3;
3433 }
3434 else
3435 {
3436 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3437
3438 /*
3439 * Load the stack segment for the new task.
3440 */
3441 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3442 {
3443 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3444 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3445 }
3446
3447 /* Fetch the descriptor. */
3448 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3449 if (rcStrict != VINF_SUCCESS)
3450 {
3451 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3452 VBOXSTRICTRC_VAL(rcStrict)));
3453 return rcStrict;
3454 }
3455
3456 /* SS must be a data segment and writable. */
3457 if ( !DescSS.Legacy.Gen.u1DescType
3458 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3459 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3460 {
3461 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3462 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3463 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3464 }
3465
3466 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3467 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3468 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3469 {
3470 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3471 uNewCpl));
3472 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3473 }
3474
3475 /* Is it there? */
3476 if (!DescSS.Legacy.Gen.u1Present)
3477 {
3478 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3479 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3480 }
3481
3482 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3483 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3484
3485 /* Set the accessed bit before committing the result into SS. */
3486 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3487 {
3488 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3489 if (rcStrict != VINF_SUCCESS)
3490 return rcStrict;
3491 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3492 }
3493
3494 /* Commit SS. */
3495 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3496 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3497 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3498 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3499 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3500 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3501 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3502
3503 /* CPL has changed, update IEM before loading rest of segments. */
3504 IEM_SET_CPL(pVCpu, uNewCpl);
3505
3506 /*
3507 * Load the data segments for the new task.
3508 */
3509 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3510 if (rcStrict != VINF_SUCCESS)
3511 return rcStrict;
3512 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3516 if (rcStrict != VINF_SUCCESS)
3517 return rcStrict;
3518 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3519 if (rcStrict != VINF_SUCCESS)
3520 return rcStrict;
3521
3522 /*
3523 * Load the code segment for the new task.
3524 */
3525 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3526 {
3527 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3528 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3529 }
3530
3531 /* Fetch the descriptor. */
3532 IEMSELDESC DescCS;
3533 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3534 if (rcStrict != VINF_SUCCESS)
3535 {
3536 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3537 return rcStrict;
3538 }
3539
3540 /* CS must be a code segment. */
3541 if ( !DescCS.Legacy.Gen.u1DescType
3542 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3543 {
3544 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3545 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3546 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3547 }
3548
3549 /* For conforming CS, DPL must be less than or equal to the RPL. */
3550 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3551 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3552 {
3553 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3554 DescCS.Legacy.Gen.u2Dpl));
3555 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3556 }
3557
3558 /* For non-conforming CS, DPL must match RPL. */
3559 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3560 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3561 {
3562 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3563 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3564 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3565 }
3566
3567 /* Is it there? */
3568 if (!DescCS.Legacy.Gen.u1Present)
3569 {
3570 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3571 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3572 }
3573
3574 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3575 u64Base = X86DESC_BASE(&DescCS.Legacy);
3576
3577 /* Set the accessed bit before committing the result into CS. */
3578 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3579 {
3580 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3581 if (rcStrict != VINF_SUCCESS)
3582 return rcStrict;
3583 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3584 }
3585
3586 /* Commit CS. */
3587 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3588 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3589 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3590 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3591 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3592 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3593 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3594 }
3595
3596 /* Make sure the CPU mode is correct. */
3597 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3598 if (fExecNew != pVCpu->iem.s.fExec)
3599 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3600 pVCpu->iem.s.fExec = fExecNew;
3601
3602 /** @todo Debug trap. */
3603 if (fIsNewTss386 && fNewDebugTrap)
3604 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3605
3606 /*
3607 * Construct the error code masks based on what caused this task switch.
3608 * See Intel Instruction reference for INT.
3609 */
3610 uint16_t uExt;
3611 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3612 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3613 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3614 uExt = 1;
3615 else
3616 uExt = 0;
3617
3618 /*
3619 * Push any error code on to the new stack.
3620 */
3621 if (fFlags & IEM_XCPT_FLAGS_ERR)
3622 {
3623 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3624 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3625 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3626
3627 /* Check that there is sufficient space on the stack. */
3628 /** @todo Factor out segment limit checking for normal/expand down segments
3629 * into a separate function. */
3630 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3631 {
3632 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3633 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3634 {
3635 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3636 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3637 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3638 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3639 }
3640 }
3641 else
3642 {
3643 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3644 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3645 {
3646 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3647 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3648 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3649 }
3650 }
3651
3652
3653 if (fIsNewTss386)
3654 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3655 else
3656 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3657 if (rcStrict != VINF_SUCCESS)
3658 {
3659 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3660 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3661 return rcStrict;
3662 }
3663 }
3664
3665 /* Check the new EIP against the new CS limit. */
3666 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3667 {
3668 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3669 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3670 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3671 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3672 }
3673
3674 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3675 pVCpu->cpum.GstCtx.ss.Sel));
3676 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3677}
3678
3679
3680/**
3681 * Implements exceptions and interrupts for protected mode.
3682 *
3683 * @returns VBox strict status code.
3684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3685 * @param cbInstr The number of bytes to offset rIP by in the return
3686 * address.
3687 * @param u8Vector The interrupt / exception vector number.
3688 * @param fFlags The flags.
3689 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3690 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3691 */
3692static VBOXSTRICTRC
3693iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3694 uint8_t cbInstr,
3695 uint8_t u8Vector,
3696 uint32_t fFlags,
3697 uint16_t uErr,
3698 uint64_t uCr2) RT_NOEXCEPT
3699{
3700 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3701
3702 /*
3703 * Read the IDT entry.
3704 */
3705 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3706 {
3707 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3708 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3709 }
3710 X86DESC Idte;
3711 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3712 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3713 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3714 {
3715 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3716 return rcStrict;
3717 }
3718 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3719 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3720 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3721 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3722
3723 /*
3724 * Check the descriptor type, DPL and such.
3725 * ASSUMES this is done in the same order as described for call-gate calls.
3726 */
3727 if (Idte.Gate.u1DescType)
3728 {
3729 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3730 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3731 }
3732 bool fTaskGate = false;
3733 uint8_t f32BitGate = true;
3734 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3735 switch (Idte.Gate.u4Type)
3736 {
3737 case X86_SEL_TYPE_SYS_UNDEFINED:
3738 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3739 case X86_SEL_TYPE_SYS_LDT:
3740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3741 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3742 case X86_SEL_TYPE_SYS_UNDEFINED2:
3743 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3744 case X86_SEL_TYPE_SYS_UNDEFINED3:
3745 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3746 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3747 case X86_SEL_TYPE_SYS_UNDEFINED4:
3748 {
3749 /** @todo check what actually happens when the type is wrong...
3750 * esp. call gates. */
3751 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3752 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3753 }
3754
3755 case X86_SEL_TYPE_SYS_286_INT_GATE:
3756 f32BitGate = false;
3757 RT_FALL_THRU();
3758 case X86_SEL_TYPE_SYS_386_INT_GATE:
3759 fEflToClear |= X86_EFL_IF;
3760 break;
3761
3762 case X86_SEL_TYPE_SYS_TASK_GATE:
3763 fTaskGate = true;
3764#ifndef IEM_IMPLEMENTS_TASKSWITCH
3765 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3766#endif
3767 break;
3768
3769 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3770 f32BitGate = false;
3771 break;
3772 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3773 break;
3774
3775 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3776 }
3777
3778 /* Check DPL against CPL if applicable. */
3779 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3780 {
3781 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3782 {
3783 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3784 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3785 }
3786 }
3787
3788 /* Is it there? */
3789 if (!Idte.Gate.u1Present)
3790 {
3791 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3792 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3793 }
3794
3795 /* Is it a task-gate? */
3796 if (fTaskGate)
3797 {
3798 /*
3799 * Construct the error code masks based on what caused this task switch.
3800 * See Intel Instruction reference for INT.
3801 */
3802 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3803 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3804 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3805 RTSEL SelTss = Idte.Gate.u16Sel;
3806
3807 /*
3808 * Fetch the TSS descriptor in the GDT.
3809 */
3810 IEMSELDESC DescTSS;
3811 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3812 if (rcStrict != VINF_SUCCESS)
3813 {
3814 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3815 VBOXSTRICTRC_VAL(rcStrict)));
3816 return rcStrict;
3817 }
3818
3819 /* The TSS descriptor must be a system segment and be available (not busy). */
3820 if ( DescTSS.Legacy.Gen.u1DescType
3821 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3822 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3823 {
3824 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3825 u8Vector, SelTss, DescTSS.Legacy.au64));
3826 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3827 }
3828
3829 /* The TSS must be present. */
3830 if (!DescTSS.Legacy.Gen.u1Present)
3831 {
3832 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3833 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3834 }
3835
3836 /* Do the actual task switch. */
3837 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3838 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3839 fFlags, uErr, uCr2, SelTss, &DescTSS);
3840 }
3841
3842 /* A null CS is bad. */
3843 RTSEL NewCS = Idte.Gate.u16Sel;
3844 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3845 {
3846 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3847 return iemRaiseGeneralProtectionFault0(pVCpu);
3848 }
3849
3850 /* Fetch the descriptor for the new CS. */
3851 IEMSELDESC DescCS;
3852 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3853 if (rcStrict != VINF_SUCCESS)
3854 {
3855 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3856 return rcStrict;
3857 }
3858
3859 /* Must be a code segment. */
3860 if (!DescCS.Legacy.Gen.u1DescType)
3861 {
3862 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3863 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3864 }
3865 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3866 {
3867 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3868 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3869 }
3870
3871 /* Don't allow lowering the privilege level. */
3872 /** @todo Does the lowering of privileges apply to software interrupts
3873 * only? This has bearings on the more-privileged or
3874 * same-privilege stack behavior further down. A testcase would
3875 * be nice. */
3876 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3877 {
3878 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3879 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3880 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3881 }
3882
3883 /* Make sure the selector is present. */
3884 if (!DescCS.Legacy.Gen.u1Present)
3885 {
3886 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3887 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3888 }
3889
3890#ifdef LOG_ENABLED
3891 /* If software interrupt, try decode it if logging is enabled and such. */
3892 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3893 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3894 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3895#endif
3896
3897 /* Check the new EIP against the new CS limit. */
3898 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3899 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3900 ? Idte.Gate.u16OffsetLow
3901 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3902 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3903 if (uNewEip > cbLimitCS)
3904 {
3905 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3906 u8Vector, uNewEip, cbLimitCS, NewCS));
3907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3908 }
3909 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3910
3911 /* Calc the flag image to push. */
3912 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3913 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3914 fEfl &= ~X86_EFL_RF;
3915 else
3916 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3917
3918 /* From V8086 mode only go to CPL 0. */
3919 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3920 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3921 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3922 {
3923 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3924 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3925 }
3926
3927 /*
3928 * If the privilege level changes, we need to get a new stack from the TSS.
3929 * This in turns means validating the new SS and ESP...
3930 */
3931 if (uNewCpl != IEM_GET_CPL(pVCpu))
3932 {
3933 RTSEL NewSS;
3934 uint32_t uNewEsp;
3935 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3936 if (rcStrict != VINF_SUCCESS)
3937 return rcStrict;
3938
3939 IEMSELDESC DescSS;
3940 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3941 if (rcStrict != VINF_SUCCESS)
3942 return rcStrict;
3943 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3944 if (!DescSS.Legacy.Gen.u1DefBig)
3945 {
3946 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3947 uNewEsp = (uint16_t)uNewEsp;
3948 }
3949
3950 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3951
3952 /* Check that there is sufficient space for the stack frame. */
3953 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3954 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3955 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3956 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3957
3958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3959 {
3960 if ( uNewEsp - 1 > cbLimitSS
3961 || uNewEsp < cbStackFrame)
3962 {
3963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3964 u8Vector, NewSS, uNewEsp, cbStackFrame));
3965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3966 }
3967 }
3968 else
3969 {
3970 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3971 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3972 {
3973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3974 u8Vector, NewSS, uNewEsp, cbStackFrame));
3975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3976 }
3977 }
3978
3979 /*
3980 * Start making changes.
3981 */
3982
3983 /* Set the new CPL so that stack accesses use it. */
3984 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3985 IEM_SET_CPL(pVCpu, uNewCpl);
3986
3987 /* Create the stack frame. */
3988 uint8_t bUnmapInfoStackFrame;
3989 RTPTRUNION uStackFrame;
3990 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3991 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3992 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3993 if (rcStrict != VINF_SUCCESS)
3994 return rcStrict;
3995 if (f32BitGate)
3996 {
3997 if (fFlags & IEM_XCPT_FLAGS_ERR)
3998 *uStackFrame.pu32++ = uErr;
3999 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4000 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4001 uStackFrame.pu32[2] = fEfl;
4002 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4003 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4004 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4005 if (fEfl & X86_EFL_VM)
4006 {
4007 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4008 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4009 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4010 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4011 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4012 }
4013 }
4014 else
4015 {
4016 if (fFlags & IEM_XCPT_FLAGS_ERR)
4017 *uStackFrame.pu16++ = uErr;
4018 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4019 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4020 uStackFrame.pu16[2] = fEfl;
4021 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4022 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4023 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4024 if (fEfl & X86_EFL_VM)
4025 {
4026 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4027 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4028 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4029 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4030 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4031 }
4032 }
4033 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4034 if (rcStrict != VINF_SUCCESS)
4035 return rcStrict;
4036
4037 /* Mark the selectors 'accessed' (hope this is the correct time). */
4038 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4039 * after pushing the stack frame? (Write protect the gdt + stack to
4040 * find out.) */
4041 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4042 {
4043 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4044 if (rcStrict != VINF_SUCCESS)
4045 return rcStrict;
4046 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4047 }
4048
4049 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4050 {
4051 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4052 if (rcStrict != VINF_SUCCESS)
4053 return rcStrict;
4054 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4055 }
4056
4057 /*
4058 * Start comitting the register changes (joins with the DPL=CPL branch).
4059 */
4060 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
4061 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
4062 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4063 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
4064 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4065 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4066 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4067 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4068 * SP is loaded).
4069 * Need to check the other combinations too:
4070 * - 16-bit TSS, 32-bit handler
4071 * - 32-bit TSS, 16-bit handler */
4072 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
4073 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
4074 else
4075 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
4076
4077 if (fEfl & X86_EFL_VM)
4078 {
4079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
4080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
4081 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
4082 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
4083 }
4084 }
4085 /*
4086 * Same privilege, no stack change and smaller stack frame.
4087 */
4088 else
4089 {
4090 uint64_t uNewRsp;
4091 uint8_t bUnmapInfoStackFrame;
4092 RTPTRUNION uStackFrame;
4093 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4094 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
4095 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
4096 if (rcStrict != VINF_SUCCESS)
4097 return rcStrict;
4098
4099 if (f32BitGate)
4100 {
4101 if (fFlags & IEM_XCPT_FLAGS_ERR)
4102 *uStackFrame.pu32++ = uErr;
4103 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4104 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
4105 uStackFrame.pu32[2] = fEfl;
4106 }
4107 else
4108 {
4109 if (fFlags & IEM_XCPT_FLAGS_ERR)
4110 *uStackFrame.pu16++ = uErr;
4111 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4112 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
4113 uStackFrame.pu16[2] = fEfl;
4114 }
4115 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
4116 if (rcStrict != VINF_SUCCESS)
4117 return rcStrict;
4118
4119 /* Mark the CS selector as 'accessed'. */
4120 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4121 {
4122 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4123 if (rcStrict != VINF_SUCCESS)
4124 return rcStrict;
4125 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4126 }
4127
4128 /*
4129 * Start committing the register changes (joins with the other branch).
4130 */
4131 pVCpu->cpum.GstCtx.rsp = uNewRsp;
4132 }
4133
4134 /* ... register committing continues. */
4135 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4136 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4137 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4138 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
4139 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4140 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4141
4142 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4143 fEfl &= ~fEflToClear;
4144 IEMMISC_SET_EFL(pVCpu, fEfl);
4145
4146 if (fFlags & IEM_XCPT_FLAGS_CR2)
4147 pVCpu->cpum.GstCtx.cr2 = uCr2;
4148
4149 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4150 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4151
4152 /* Make sure the execution flags are correct. */
4153 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
4154 if (fExecNew != pVCpu->iem.s.fExec)
4155 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
4156 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
4157 pVCpu->iem.s.fExec = fExecNew;
4158 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
4159
4160 /*
4161 * Deal with debug events that follows the exception and clear inhibit flags.
4162 */
4163 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4164 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4165 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4166 else
4167 {
4168 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
4169 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4170 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4171 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4172 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4173 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4174 return iemRaiseDebugException(pVCpu);
4175 }
4176
4177 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4178}
4179
4180
4181/**
4182 * Implements exceptions and interrupts for long mode.
4183 *
4184 * @returns VBox strict status code.
4185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4186 * @param cbInstr The number of bytes to offset rIP by in the return
4187 * address.
4188 * @param u8Vector The interrupt / exception vector number.
4189 * @param fFlags The flags.
4190 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4191 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4192 */
4193static VBOXSTRICTRC
4194iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
4195 uint8_t cbInstr,
4196 uint8_t u8Vector,
4197 uint32_t fFlags,
4198 uint16_t uErr,
4199 uint64_t uCr2) RT_NOEXCEPT
4200{
4201 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4202
4203 /*
4204 * Read the IDT entry.
4205 */
4206 uint16_t offIdt = (uint16_t)u8Vector << 4;
4207 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
4208 {
4209 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4210 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4211 }
4212 X86DESC64 Idte;
4213#ifdef _MSC_VER /* Shut up silly compiler warning. */
4214 Idte.au64[0] = 0;
4215 Idte.au64[1] = 0;
4216#endif
4217 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
4218 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4219 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
4220 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4221 {
4222 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4223 return rcStrict;
4224 }
4225 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4226 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4227 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4228
4229 /*
4230 * Check the descriptor type, DPL and such.
4231 * ASSUMES this is done in the same order as described for call-gate calls.
4232 */
4233 if (Idte.Gate.u1DescType)
4234 {
4235 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4236 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4237 }
4238 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4239 switch (Idte.Gate.u4Type)
4240 {
4241 case AMD64_SEL_TYPE_SYS_INT_GATE:
4242 fEflToClear |= X86_EFL_IF;
4243 break;
4244 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4245 break;
4246
4247 default:
4248 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4249 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4250 }
4251
4252 /* Check DPL against CPL if applicable. */
4253 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4254 {
4255 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
4256 {
4257 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
4258 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4259 }
4260 }
4261
4262 /* Is it there? */
4263 if (!Idte.Gate.u1Present)
4264 {
4265 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4266 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4267 }
4268
4269 /* A null CS is bad. */
4270 RTSEL NewCS = Idte.Gate.u16Sel;
4271 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4272 {
4273 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4274 return iemRaiseGeneralProtectionFault0(pVCpu);
4275 }
4276
4277 /* Fetch the descriptor for the new CS. */
4278 IEMSELDESC DescCS;
4279 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4280 if (rcStrict != VINF_SUCCESS)
4281 {
4282 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4283 return rcStrict;
4284 }
4285
4286 /* Must be a 64-bit code segment. */
4287 if (!DescCS.Long.Gen.u1DescType)
4288 {
4289 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4290 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4291 }
4292 if ( !DescCS.Long.Gen.u1Long
4293 || DescCS.Long.Gen.u1DefBig
4294 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4295 {
4296 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4297 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4298 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4299 }
4300
4301 /* Don't allow lowering the privilege level. For non-conforming CS
4302 selectors, the CS.DPL sets the privilege level the trap/interrupt
4303 handler runs at. For conforming CS selectors, the CPL remains
4304 unchanged, but the CS.DPL must be <= CPL. */
4305 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4306 * when CPU in Ring-0. Result \#GP? */
4307 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4308 {
4309 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4310 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4311 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4312 }
4313
4314
4315 /* Make sure the selector is present. */
4316 if (!DescCS.Legacy.Gen.u1Present)
4317 {
4318 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4319 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4320 }
4321
4322 /* Check that the new RIP is canonical. */
4323 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4324 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4325 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4326 if (!IEM_IS_CANONICAL(uNewRip))
4327 {
4328 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4329 return iemRaiseGeneralProtectionFault0(pVCpu);
4330 }
4331
4332 /*
4333 * If the privilege level changes or if the IST isn't zero, we need to get
4334 * a new stack from the TSS.
4335 */
4336 uint64_t uNewRsp;
4337 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4338 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4339 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4340 || Idte.Gate.u3IST != 0)
4341 {
4342 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4343 if (rcStrict != VINF_SUCCESS)
4344 return rcStrict;
4345 }
4346 else
4347 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4348 uNewRsp &= ~(uint64_t)0xf;
4349
4350 /*
4351 * Calc the flag image to push.
4352 */
4353 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4354 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4355 fEfl &= ~X86_EFL_RF;
4356 else
4357 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4358
4359 /*
4360 * Start making changes.
4361 */
4362 /* Set the new CPL so that stack accesses use it. */
4363 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4364 IEM_SET_CPL(pVCpu, uNewCpl);
4365/** @todo Setting CPL this early seems wrong as it would affect and errors we
4366 * raise accessing the stack and (?) GDT/LDT... */
4367
4368 /* Create the stack frame. */
4369 uint8_t bUnmapInfoStackFrame;
4370 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4371 RTPTRUNION uStackFrame;
4372 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4373 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4374 if (rcStrict != VINF_SUCCESS)
4375 return rcStrict;
4376
4377 if (fFlags & IEM_XCPT_FLAGS_ERR)
4378 *uStackFrame.pu64++ = uErr;
4379 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4380 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4381 uStackFrame.pu64[2] = fEfl;
4382 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4383 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4384 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4385 if (rcStrict != VINF_SUCCESS)
4386 return rcStrict;
4387
4388 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4389 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4390 * after pushing the stack frame? (Write protect the gdt + stack to
4391 * find out.) */
4392 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4393 {
4394 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4395 if (rcStrict != VINF_SUCCESS)
4396 return rcStrict;
4397 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4398 }
4399
4400 /*
4401 * Start comitting the register changes.
4402 */
4403 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4404 * hidden registers when interrupting 32-bit or 16-bit code! */
4405 if (uNewCpl != uOldCpl)
4406 {
4407 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4408 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4409 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4410 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4411 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4412 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4413 }
4414 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4415 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4416 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4417 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4418 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4419 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4420 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4421 pVCpu->cpum.GstCtx.rip = uNewRip;
4422
4423 fEfl &= ~fEflToClear;
4424 IEMMISC_SET_EFL(pVCpu, fEfl);
4425
4426 if (fFlags & IEM_XCPT_FLAGS_CR2)
4427 pVCpu->cpum.GstCtx.cr2 = uCr2;
4428
4429 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4430 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4431
4432 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4433
4434 /*
4435 * Deal with debug events that follows the exception and clear inhibit flags.
4436 */
4437 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4438 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4439 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4440 else
4441 {
4442 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4443 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4444 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4445 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4446 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4447 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4448 return iemRaiseDebugException(pVCpu);
4449 }
4450
4451 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4452}
4453
4454
4455/**
4456 * Implements exceptions and interrupts.
4457 *
4458 * All exceptions and interrupts goes thru this function!
4459 *
4460 * @returns VBox strict status code.
4461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4462 * @param cbInstr The number of bytes to offset rIP by in the return
4463 * address.
4464 * @param u8Vector The interrupt / exception vector number.
4465 * @param fFlags The flags.
4466 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4467 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4468 */
4469VBOXSTRICTRC
4470iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4471 uint8_t cbInstr,
4472 uint8_t u8Vector,
4473 uint32_t fFlags,
4474 uint16_t uErr,
4475 uint64_t uCr2) RT_NOEXCEPT
4476{
4477 /*
4478 * Get all the state that we might need here.
4479 */
4480 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4481 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4482
4483#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4484 /*
4485 * Flush prefetch buffer
4486 */
4487 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4488#endif
4489
4490 /*
4491 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4492 */
4493 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4494 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4495 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4496 | IEM_XCPT_FLAGS_BP_INSTR
4497 | IEM_XCPT_FLAGS_ICEBP_INSTR
4498 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4499 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4500 {
4501 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4502 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4503 u8Vector = X86_XCPT_GP;
4504 uErr = 0;
4505 }
4506
4507 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4508#ifdef DBGFTRACE_ENABLED
4509 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4510 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4511 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4512#endif
4513
4514 /*
4515 * Check if DBGF wants to intercept the exception.
4516 */
4517 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4518 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4519 { /* likely */ }
4520 else
4521 {
4522 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4523 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4524 if (rcStrict != VINF_SUCCESS)
4525 return rcStrict;
4526 }
4527
4528 /*
4529 * Evaluate whether NMI blocking should be in effect.
4530 * Normally, NMI blocking is in effect whenever we inject an NMI.
4531 */
4532 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4533 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4534
4535#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4536 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4537 {
4538 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4539 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4540 return rcStrict0;
4541
4542 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4543 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4544 {
4545 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4546 fBlockNmi = false;
4547 }
4548 }
4549#endif
4550
4551#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4552 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4553 {
4554 /*
4555 * If the event is being injected as part of VMRUN, it isn't subject to event
4556 * intercepts in the nested-guest. However, secondary exceptions that occur
4557 * during injection of any event -are- subject to exception intercepts.
4558 *
4559 * See AMD spec. 15.20 "Event Injection".
4560 */
4561 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4562 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4563 else
4564 {
4565 /*
4566 * Check and handle if the event being raised is intercepted.
4567 */
4568 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4569 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4570 return rcStrict0;
4571 }
4572 }
4573#endif
4574
4575 /*
4576 * Set NMI blocking if necessary.
4577 */
4578 if (fBlockNmi)
4579 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4580
4581 /*
4582 * Do recursion accounting.
4583 */
4584 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4585 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4586 if (pVCpu->iem.s.cXcptRecursions == 0)
4587 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4588 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4589 else
4590 {
4591 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4592 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4593 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4594
4595 if (pVCpu->iem.s.cXcptRecursions >= 4)
4596 {
4597#ifdef DEBUG_bird
4598 AssertFailed();
4599#endif
4600 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4601 }
4602
4603 /*
4604 * Evaluate the sequence of recurring events.
4605 */
4606 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4607 NULL /* pXcptRaiseInfo */);
4608 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4609 { /* likely */ }
4610 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4611 {
4612 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4613 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4614 u8Vector = X86_XCPT_DF;
4615 uErr = 0;
4616#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4617 /* VMX nested-guest #DF intercept needs to be checked here. */
4618 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4619 {
4620 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4621 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4622 return rcStrict0;
4623 }
4624#endif
4625 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4626 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4627 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4628 }
4629 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4630 {
4631 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4632 return iemInitiateCpuShutdown(pVCpu);
4633 }
4634 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4635 {
4636 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4637 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4638 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4639 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4640 return VERR_EM_GUEST_CPU_HANG;
4641 }
4642 else
4643 {
4644 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4645 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4646 return VERR_IEM_IPE_9;
4647 }
4648
4649 /*
4650 * The 'EXT' bit is set when an exception occurs during deliver of an external
4651 * event (such as an interrupt or earlier exception)[1]. Privileged software
4652 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4653 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4654 *
4655 * [1] - Intel spec. 6.13 "Error Code"
4656 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4657 * [3] - Intel Instruction reference for INT n.
4658 */
4659 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4660 && (fFlags & IEM_XCPT_FLAGS_ERR)
4661 && u8Vector != X86_XCPT_PF
4662 && u8Vector != X86_XCPT_DF)
4663 {
4664 uErr |= X86_TRAP_ERR_EXTERNAL;
4665 }
4666 }
4667
4668 pVCpu->iem.s.cXcptRecursions++;
4669 pVCpu->iem.s.uCurXcpt = u8Vector;
4670 pVCpu->iem.s.fCurXcpt = fFlags;
4671 pVCpu->iem.s.uCurXcptErr = uErr;
4672 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4673
4674 /*
4675 * Extensive logging.
4676 */
4677#if defined(LOG_ENABLED) && defined(IN_RING3)
4678 if (LogIs3Enabled())
4679 {
4680 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4681 char szRegs[4096];
4682 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4683 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4684 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4685 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4686 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4687 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4688 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4689 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4690 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4691 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4692 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4693 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4694 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4695 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4696 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4697 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4698 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4699 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4700 " efer=%016VR{efer}\n"
4701 " pat=%016VR{pat}\n"
4702 " sf_mask=%016VR{sf_mask}\n"
4703 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4704 " lstar=%016VR{lstar}\n"
4705 " star=%016VR{star} cstar=%016VR{cstar}\n"
4706 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4707 );
4708
4709 char szInstr[256];
4710 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4711 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4712 szInstr, sizeof(szInstr), NULL);
4713 Log3(("%s%s\n", szRegs, szInstr));
4714 }
4715#endif /* LOG_ENABLED */
4716
4717 /*
4718 * Stats.
4719 */
4720 uint64_t const uTimestamp = ASMReadTSC();
4721 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4722 {
4723 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4724 EMHistoryAddExit(pVCpu,
4725 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4726 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4727 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4728 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4729 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
4730 }
4731 else
4732 {
4733 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4734 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4735 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4736 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4737 if (fFlags & IEM_XCPT_FLAGS_ERR)
4738 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4739 if (fFlags & IEM_XCPT_FLAGS_CR2)
4740 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4741 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
4742 }
4743
4744 /*
4745 * Hack alert! Convert incoming debug events to slient on Intel.
4746 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4747 */
4748 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4749 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4750 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4751 { /* ignore */ }
4752 else
4753 {
4754 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4755 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4756 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4757 | CPUMCTX_DBG_HIT_DRX_SILENT;
4758 }
4759
4760 /*
4761 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4762 * to ensure that a stale TLB or paging cache entry will only cause one
4763 * spurious #PF.
4764 */
4765 if ( u8Vector == X86_XCPT_PF
4766 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4767 IEMTlbInvalidatePage(pVCpu, uCr2);
4768
4769 /*
4770 * Call the mode specific worker function.
4771 */
4772 VBOXSTRICTRC rcStrict;
4773 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4774 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4775 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4776 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4777 else
4778 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4779
4780 /* Flush the prefetch buffer. */
4781 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4782
4783 /*
4784 * Unwind.
4785 */
4786 pVCpu->iem.s.cXcptRecursions--;
4787 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4788 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4789 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4790 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4791 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4792 return rcStrict;
4793}
4794
4795#ifdef IEM_WITH_SETJMP
4796/**
4797 * See iemRaiseXcptOrInt. Will not return.
4798 */
4799DECL_NO_RETURN(void)
4800iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4801 uint8_t cbInstr,
4802 uint8_t u8Vector,
4803 uint32_t fFlags,
4804 uint16_t uErr,
4805 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4806{
4807 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4808 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4809}
4810#endif
4811
4812
4813/** \#DE - 00. */
4814VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4815{
4816 if (GCMIsInterceptingXcptDE(pVCpu))
4817 {
4818 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4819 if (rc == VINF_SUCCESS)
4820 {
4821 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4822 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4823 }
4824 }
4825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4826}
4827
4828
4829#ifdef IEM_WITH_SETJMP
4830/** \#DE - 00. */
4831DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4832{
4833 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4834}
4835#endif
4836
4837
4838/** \#DB - 01.
4839 * @note This automatically clear DR7.GD. */
4840VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4841{
4842 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4843 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4845}
4846
4847
4848/** \#BR - 05. */
4849VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4850{
4851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4852}
4853
4854
4855/** \#UD - 06. */
4856VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4857{
4858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4859}
4860
4861
4862#ifdef IEM_WITH_SETJMP
4863/** \#UD - 06. */
4864DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4865{
4866 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4867}
4868#endif
4869
4870
4871/** \#NM - 07. */
4872VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4873{
4874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4875}
4876
4877
4878#ifdef IEM_WITH_SETJMP
4879/** \#NM - 07. */
4880DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4881{
4882 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4883}
4884#endif
4885
4886
4887/** \#TS(err) - 0a. */
4888VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4889{
4890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4891}
4892
4893
4894/** \#TS(tr) - 0a. */
4895VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4896{
4897 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4898 pVCpu->cpum.GstCtx.tr.Sel, 0);
4899}
4900
4901
4902/** \#TS(0) - 0a. */
4903VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4904{
4905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4906 0, 0);
4907}
4908
4909
4910/** \#TS(err) - 0a. */
4911VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4912{
4913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4914 uSel & X86_SEL_MASK_OFF_RPL, 0);
4915}
4916
4917
4918/** \#NP(err) - 0b. */
4919VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4920{
4921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4922}
4923
4924
4925/** \#NP(sel) - 0b. */
4926VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4927{
4928 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4929 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4930 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4931 uSel & ~X86_SEL_RPL, 0);
4932}
4933
4934
4935/** \#SS(seg) - 0c. */
4936VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4937{
4938 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4939 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4940 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4941 uSel & ~X86_SEL_RPL, 0);
4942}
4943
4944
4945/** \#SS(err) - 0c. */
4946VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4947{
4948 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4949 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4950 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4951}
4952
4953
4954/** \#GP(n) - 0d. */
4955VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4956{
4957 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4959}
4960
4961
4962/** \#GP(0) - 0d. */
4963VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4964{
4965 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4966 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4967}
4968
4969#ifdef IEM_WITH_SETJMP
4970/** \#GP(0) - 0d. */
4971DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4972{
4973 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4974 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4975}
4976#endif
4977
4978
4979/** \#GP(sel) - 0d. */
4980VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4981{
4982 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4983 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4984 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4985 Sel & ~X86_SEL_RPL, 0);
4986}
4987
4988
4989/** \#GP(0) - 0d. */
4990VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4991{
4992 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4993 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4994}
4995
4996
4997/** \#GP(sel) - 0d. */
4998VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4999{
5000 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
5001 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5002 NOREF(iSegReg); NOREF(fAccess);
5003 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5004 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5005}
5006
5007#ifdef IEM_WITH_SETJMP
5008/** \#GP(sel) - 0d, longjmp. */
5009DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
5010{
5011 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
5012 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5013 NOREF(iSegReg); NOREF(fAccess);
5014 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5015 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5016}
5017#endif
5018
5019/** \#GP(sel) - 0d. */
5020VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
5021{
5022 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
5023 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
5024 NOREF(Sel);
5025 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5026}
5027
5028#ifdef IEM_WITH_SETJMP
5029/** \#GP(sel) - 0d, longjmp. */
5030DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
5031{
5032 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
5033 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
5034 NOREF(Sel);
5035 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5036}
5037#endif
5038
5039
5040/** \#GP(sel) - 0d. */
5041VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
5042{
5043 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
5044 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5045 NOREF(iSegReg); NOREF(fAccess);
5046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5047}
5048
5049#ifdef IEM_WITH_SETJMP
5050/** \#GP(sel) - 0d, longjmp. */
5051DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
5052{
5053 NOREF(iSegReg); NOREF(fAccess);
5054 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5055}
5056#endif
5057
5058
5059/** \#PF(n) - 0e. */
5060VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
5061{
5062 uint16_t uErr;
5063 switch (rc)
5064 {
5065 case VERR_PAGE_NOT_PRESENT:
5066 case VERR_PAGE_TABLE_NOT_PRESENT:
5067 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5068 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5069 uErr = 0;
5070 break;
5071
5072 case VERR_RESERVED_PAGE_TABLE_BITS:
5073 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
5074 break;
5075
5076 default:
5077 AssertMsgFailed(("%Rrc\n", rc));
5078 RT_FALL_THRU();
5079 case VERR_ACCESS_DENIED:
5080 uErr = X86_TRAP_PF_P;
5081 break;
5082 }
5083
5084 if (IEM_GET_CPL(pVCpu) == 3)
5085 uErr |= X86_TRAP_PF_US;
5086
5087 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5088 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5089 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5090 uErr |= X86_TRAP_PF_ID;
5091
5092#if 0 /* This is so much non-sense, really. Why was it done like that? */
5093 /* Note! RW access callers reporting a WRITE protection fault, will clear
5094 the READ flag before calling. So, read-modify-write accesses (RW)
5095 can safely be reported as READ faults. */
5096 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5097 uErr |= X86_TRAP_PF_RW;
5098#else
5099 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5100 {
5101 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5102 /// (regardless of outcome of the comparison in the latter case).
5103 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5104 uErr |= X86_TRAP_PF_RW;
5105 }
5106#endif
5107
5108 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
5109 of the memory operand rather than at the start of it. (Not sure what
5110 happens if it crosses a page boundrary.) The current heuristics for
5111 this is to report the #PF for the last byte if the access is more than
5112 64 bytes. This is probably not correct, but we can work that out later,
5113 main objective now is to get FXSAVE to work like for real hardware and
5114 make bs3-cpu-basic2 work. */
5115 if (cbAccess <= 64)
5116 { /* likely*/ }
5117 else
5118 GCPtrWhere += cbAccess - 1;
5119
5120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5121 uErr, GCPtrWhere);
5122}
5123
5124#ifdef IEM_WITH_SETJMP
5125/** \#PF(n) - 0e, longjmp. */
5126DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
5127 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
5128{
5129 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
5130}
5131#endif
5132
5133
5134/** \#MF(0) - 10. */
5135VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
5136{
5137 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
5138 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5139
5140 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
5141 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
5142 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
5143}
5144
5145#ifdef IEM_WITH_SETJMP
5146/** \#MF(0) - 10, longjmp. */
5147DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5148{
5149 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
5150}
5151#endif
5152
5153
5154/** \#AC(0) - 11. */
5155VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
5156{
5157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5158}
5159
5160#ifdef IEM_WITH_SETJMP
5161/** \#AC(0) - 11, longjmp. */
5162DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5163{
5164 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
5165}
5166#endif
5167
5168
5169/** \#XF(0)/\#XM(0) - 19. */
5170VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
5171{
5172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5173}
5174
5175
5176#ifdef IEM_WITH_SETJMP
5177/** \#XF(0)/\#XM(0) - 19s, longjmp. */
5178DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5179{
5180 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
5181}
5182#endif
5183
5184
5185/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
5186IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5187{
5188 NOREF(cbInstr);
5189 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5190}
5191
5192
5193/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
5194IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5195{
5196 NOREF(cbInstr);
5197 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5198}
5199
5200
5201/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
5202IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5203{
5204 NOREF(cbInstr);
5205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5206}
5207
5208
5209/** @} */
5210
5211/** @name Common opcode decoders.
5212 * @{
5213 */
5214//#include <iprt/mem.h>
5215
5216/**
5217 * Used to add extra details about a stub case.
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 */
5220void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
5221{
5222#if defined(LOG_ENABLED) && defined(IN_RING3)
5223 PVM pVM = pVCpu->CTX_SUFF(pVM);
5224 char szRegs[4096];
5225 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5226 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5227 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5228 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5229 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5230 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5231 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5232 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5233 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5234 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5235 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5236 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5237 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5238 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5239 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5240 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5241 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5242 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5243 " efer=%016VR{efer}\n"
5244 " pat=%016VR{pat}\n"
5245 " sf_mask=%016VR{sf_mask}\n"
5246 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5247 " lstar=%016VR{lstar}\n"
5248 " star=%016VR{star} cstar=%016VR{cstar}\n"
5249 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5250 );
5251
5252 char szInstr[256];
5253 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5254 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5255 szInstr, sizeof(szInstr), NULL);
5256
5257 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5258#else
5259 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
5260#endif
5261}
5262
5263/** @} */
5264
5265
5266
5267/** @name Register Access.
5268 * @{
5269 */
5270
5271/**
5272 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5273 *
5274 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5275 * segment limit.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5278 * @param cbInstr Instruction size.
5279 * @param offNextInstr The offset of the next instruction.
5280 * @param enmEffOpSize Effective operand size.
5281 */
5282VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5283 IEMMODE enmEffOpSize) RT_NOEXCEPT
5284{
5285 switch (enmEffOpSize)
5286 {
5287 case IEMMODE_16BIT:
5288 {
5289 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
5290 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5291 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5292 pVCpu->cpum.GstCtx.rip = uNewIp;
5293 else
5294 return iemRaiseGeneralProtectionFault0(pVCpu);
5295 break;
5296 }
5297
5298 case IEMMODE_32BIT:
5299 {
5300 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5301 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5302
5303 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5304 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5305 pVCpu->cpum.GstCtx.rip = uNewEip;
5306 else
5307 return iemRaiseGeneralProtectionFault0(pVCpu);
5308 break;
5309 }
5310
5311 case IEMMODE_64BIT:
5312 {
5313 Assert(IEM_IS_64BIT_CODE(pVCpu));
5314
5315 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5316 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5317 pVCpu->cpum.GstCtx.rip = uNewRip;
5318 else
5319 return iemRaiseGeneralProtectionFault0(pVCpu);
5320 break;
5321 }
5322
5323 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5324 }
5325
5326#ifndef IEM_WITH_CODE_TLB
5327 /* Flush the prefetch buffer. */
5328 pVCpu->iem.s.cbOpcode = cbInstr;
5329#endif
5330
5331 /*
5332 * Clear RF and finish the instruction (maybe raise #DB).
5333 */
5334 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5335}
5336
5337
5338/**
5339 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5340 *
5341 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5342 * segment limit.
5343 *
5344 * @returns Strict VBox status code.
5345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5346 * @param cbInstr Instruction size.
5347 * @param offNextInstr The offset of the next instruction.
5348 */
5349VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5350{
5351 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5352
5353 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5354 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5355 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5356 pVCpu->cpum.GstCtx.rip = uNewIp;
5357 else
5358 return iemRaiseGeneralProtectionFault0(pVCpu);
5359
5360#ifndef IEM_WITH_CODE_TLB
5361 /* Flush the prefetch buffer. */
5362 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5363#endif
5364
5365 /*
5366 * Clear RF and finish the instruction (maybe raise #DB).
5367 */
5368 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5369}
5370
5371
5372/**
5373 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5374 *
5375 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5376 * segment limit.
5377 *
5378 * @returns Strict VBox status code.
5379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5380 * @param cbInstr Instruction size.
5381 * @param offNextInstr The offset of the next instruction.
5382 * @param enmEffOpSize Effective operand size.
5383 */
5384VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5385 IEMMODE enmEffOpSize) RT_NOEXCEPT
5386{
5387 if (enmEffOpSize == IEMMODE_32BIT)
5388 {
5389 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5390
5391 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5392 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5393 pVCpu->cpum.GstCtx.rip = uNewEip;
5394 else
5395 return iemRaiseGeneralProtectionFault0(pVCpu);
5396 }
5397 else
5398 {
5399 Assert(enmEffOpSize == IEMMODE_64BIT);
5400
5401 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5402 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5403 pVCpu->cpum.GstCtx.rip = uNewRip;
5404 else
5405 return iemRaiseGeneralProtectionFault0(pVCpu);
5406 }
5407
5408#ifndef IEM_WITH_CODE_TLB
5409 /* Flush the prefetch buffer. */
5410 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5411#endif
5412
5413 /*
5414 * Clear RF and finish the instruction (maybe raise #DB).
5415 */
5416 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5417}
5418
5419/** @} */
5420
5421
5422/** @name FPU access and helpers.
5423 *
5424 * @{
5425 */
5426
5427/**
5428 * Updates the x87.DS and FPUDP registers.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pFpuCtx The FPU context.
5432 * @param iEffSeg The effective segment register.
5433 * @param GCPtrEff The effective address relative to @a iEffSeg.
5434 */
5435DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5436{
5437 RTSEL sel;
5438 switch (iEffSeg)
5439 {
5440 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5441 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5442 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5443 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5444 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5445 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5446 default:
5447 AssertMsgFailed(("%d\n", iEffSeg));
5448 sel = pVCpu->cpum.GstCtx.ds.Sel;
5449 }
5450 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5451 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5452 {
5453 pFpuCtx->DS = 0;
5454 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5455 }
5456 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5457 {
5458 pFpuCtx->DS = sel;
5459 pFpuCtx->FPUDP = GCPtrEff;
5460 }
5461 else
5462 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5463}
5464
5465
5466/**
5467 * Rotates the stack registers in the push direction.
5468 *
5469 * @param pFpuCtx The FPU context.
5470 * @remarks This is a complete waste of time, but fxsave stores the registers in
5471 * stack order.
5472 */
5473DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5474{
5475 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5476 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5477 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5478 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5479 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5480 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5481 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5482 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5483 pFpuCtx->aRegs[0].r80 = r80Tmp;
5484}
5485
5486
5487/**
5488 * Rotates the stack registers in the pop direction.
5489 *
5490 * @param pFpuCtx The FPU context.
5491 * @remarks This is a complete waste of time, but fxsave stores the registers in
5492 * stack order.
5493 */
5494DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5495{
5496 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5497 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5498 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5499 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5500 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5501 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5502 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5503 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5504 pFpuCtx->aRegs[7].r80 = r80Tmp;
5505}
5506
5507
5508/**
5509 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5510 * exception prevents it.
5511 *
5512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5513 * @param pResult The FPU operation result to push.
5514 * @param pFpuCtx The FPU context.
5515 */
5516static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5517{
5518 /* Update FSW and bail if there are pending exceptions afterwards. */
5519 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5520 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5521 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5522 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5523 {
5524 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5525 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5526 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5527 pFpuCtx->FSW = fFsw;
5528 return;
5529 }
5530
5531 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5532 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5533 {
5534 /* All is fine, push the actual value. */
5535 pFpuCtx->FTW |= RT_BIT(iNewTop);
5536 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5537 }
5538 else if (pFpuCtx->FCW & X86_FCW_IM)
5539 {
5540 /* Masked stack overflow, push QNaN. */
5541 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5542 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5543 }
5544 else
5545 {
5546 /* Raise stack overflow, don't push anything. */
5547 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5548 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5549 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5550 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5551 return;
5552 }
5553
5554 fFsw &= ~X86_FSW_TOP_MASK;
5555 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5556 pFpuCtx->FSW = fFsw;
5557
5558 iemFpuRotateStackPush(pFpuCtx);
5559 RT_NOREF(pVCpu);
5560}
5561
5562
5563/**
5564 * Stores a result in a FPU register and updates the FSW and FTW.
5565 *
5566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5567 * @param pFpuCtx The FPU context.
5568 * @param pResult The result to store.
5569 * @param iStReg Which FPU register to store it in.
5570 */
5571static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5572{
5573 Assert(iStReg < 8);
5574 uint16_t fNewFsw = pFpuCtx->FSW;
5575 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5576 fNewFsw &= ~X86_FSW_C_MASK;
5577 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5578 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5579 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5580 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5581 pFpuCtx->FSW = fNewFsw;
5582 pFpuCtx->FTW |= RT_BIT(iReg);
5583 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5584 RT_NOREF(pVCpu);
5585}
5586
5587
5588/**
5589 * Only updates the FPU status word (FSW) with the result of the current
5590 * instruction.
5591 *
5592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5593 * @param pFpuCtx The FPU context.
5594 * @param u16FSW The FSW output of the current instruction.
5595 */
5596static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5597{
5598 uint16_t fNewFsw = pFpuCtx->FSW;
5599 fNewFsw &= ~X86_FSW_C_MASK;
5600 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5601 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5602 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5603 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5604 pFpuCtx->FSW = fNewFsw;
5605 RT_NOREF(pVCpu);
5606}
5607
5608
5609/**
5610 * Pops one item off the FPU stack if no pending exception prevents it.
5611 *
5612 * @param pFpuCtx The FPU context.
5613 */
5614static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5615{
5616 /* Check pending exceptions. */
5617 uint16_t uFSW = pFpuCtx->FSW;
5618 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5619 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5620 return;
5621
5622 /* TOP--. */
5623 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5624 uFSW &= ~X86_FSW_TOP_MASK;
5625 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5626 pFpuCtx->FSW = uFSW;
5627
5628 /* Mark the previous ST0 as empty. */
5629 iOldTop >>= X86_FSW_TOP_SHIFT;
5630 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5631
5632 /* Rotate the registers. */
5633 iemFpuRotateStackPop(pFpuCtx);
5634}
5635
5636
5637/**
5638 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5639 *
5640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5641 * @param pResult The FPU operation result to push.
5642 * @param uFpuOpcode The FPU opcode value.
5643 */
5644void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5645{
5646 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5647 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5648 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5649}
5650
5651
5652/**
5653 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5654 * and sets FPUDP and FPUDS.
5655 *
5656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5657 * @param pResult The FPU operation result to push.
5658 * @param iEffSeg The effective segment register.
5659 * @param GCPtrEff The effective address relative to @a iEffSeg.
5660 * @param uFpuOpcode The FPU opcode value.
5661 */
5662void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5663 uint16_t uFpuOpcode) RT_NOEXCEPT
5664{
5665 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5666 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5667 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5668 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5669}
5670
5671
5672/**
5673 * Replace ST0 with the first value and push the second onto the FPU stack,
5674 * unless a pending exception prevents it.
5675 *
5676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5677 * @param pResult The FPU operation result to store and push.
5678 * @param uFpuOpcode The FPU opcode value.
5679 */
5680void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5681{
5682 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5683 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5684
5685 /* Update FSW and bail if there are pending exceptions afterwards. */
5686 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5687 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5688 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5689 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5690 {
5691 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5692 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5693 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5694 pFpuCtx->FSW = fFsw;
5695 return;
5696 }
5697
5698 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5699 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5700 {
5701 /* All is fine, push the actual value. */
5702 pFpuCtx->FTW |= RT_BIT(iNewTop);
5703 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5704 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5705 }
5706 else if (pFpuCtx->FCW & X86_FCW_IM)
5707 {
5708 /* Masked stack overflow, push QNaN. */
5709 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5710 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5711 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5712 }
5713 else
5714 {
5715 /* Raise stack overflow, don't push anything. */
5716 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5717 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5718 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5719 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5720 return;
5721 }
5722
5723 fFsw &= ~X86_FSW_TOP_MASK;
5724 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5725 pFpuCtx->FSW = fFsw;
5726
5727 iemFpuRotateStackPush(pFpuCtx);
5728}
5729
5730
5731/**
5732 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5733 * FOP.
5734 *
5735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5736 * @param pResult The result to store.
5737 * @param iStReg Which FPU register to store it in.
5738 * @param uFpuOpcode The FPU opcode value.
5739 */
5740void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5741{
5742 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5743 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5744 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5745}
5746
5747
5748/**
5749 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5750 * FOP, and then pops the stack.
5751 *
5752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5753 * @param pResult The result to store.
5754 * @param iStReg Which FPU register to store it in.
5755 * @param uFpuOpcode The FPU opcode value.
5756 */
5757void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5758{
5759 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5760 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5761 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5762 iemFpuMaybePopOne(pFpuCtx);
5763}
5764
5765
5766/**
5767 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5768 * FPUDP, and FPUDS.
5769 *
5770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5771 * @param pResult The result to store.
5772 * @param iStReg Which FPU register to store it in.
5773 * @param iEffSeg The effective memory operand selector register.
5774 * @param GCPtrEff The effective memory operand offset.
5775 * @param uFpuOpcode The FPU opcode value.
5776 */
5777void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5778 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5779{
5780 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5781 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5782 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5783 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5784}
5785
5786
5787/**
5788 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5789 * FPUDP, and FPUDS, and then pops the stack.
5790 *
5791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5792 * @param pResult The result to store.
5793 * @param iStReg Which FPU register to store it in.
5794 * @param iEffSeg The effective memory operand selector register.
5795 * @param GCPtrEff The effective memory operand offset.
5796 * @param uFpuOpcode The FPU opcode value.
5797 */
5798void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5799 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5800{
5801 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5802 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5803 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5804 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5805 iemFpuMaybePopOne(pFpuCtx);
5806}
5807
5808
5809/**
5810 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5811 *
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param uFpuOpcode The FPU opcode value.
5814 */
5815void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5816{
5817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5818 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5819}
5820
5821
5822/**
5823 * Updates the FSW, FOP, FPUIP, and FPUCS.
5824 *
5825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5826 * @param u16FSW The FSW from the current instruction.
5827 * @param uFpuOpcode The FPU opcode value.
5828 */
5829void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5830{
5831 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5832 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5833 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5834}
5835
5836
5837/**
5838 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5839 *
5840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5841 * @param u16FSW The FSW from the current instruction.
5842 * @param uFpuOpcode The FPU opcode value.
5843 */
5844void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5845{
5846 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5847 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5848 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5849 iemFpuMaybePopOne(pFpuCtx);
5850}
5851
5852
5853/**
5854 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5855 *
5856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5857 * @param u16FSW The FSW from the current instruction.
5858 * @param iEffSeg The effective memory operand selector register.
5859 * @param GCPtrEff The effective memory operand offset.
5860 * @param uFpuOpcode The FPU opcode value.
5861 */
5862void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5863{
5864 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5865 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5866 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5867 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5868}
5869
5870
5871/**
5872 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5873 *
5874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5875 * @param u16FSW The FSW from the current instruction.
5876 * @param uFpuOpcode The FPU opcode value.
5877 */
5878void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5879{
5880 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5881 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5882 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5883 iemFpuMaybePopOne(pFpuCtx);
5884 iemFpuMaybePopOne(pFpuCtx);
5885}
5886
5887
5888/**
5889 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5890 *
5891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5892 * @param u16FSW The FSW from the current instruction.
5893 * @param iEffSeg The effective memory operand selector register.
5894 * @param GCPtrEff The effective memory operand offset.
5895 * @param uFpuOpcode The FPU opcode value.
5896 */
5897void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5898 uint16_t uFpuOpcode) RT_NOEXCEPT
5899{
5900 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5901 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5902 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5903 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5904 iemFpuMaybePopOne(pFpuCtx);
5905}
5906
5907
5908/**
5909 * Worker routine for raising an FPU stack underflow exception.
5910 *
5911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5912 * @param pFpuCtx The FPU context.
5913 * @param iStReg The stack register being accessed.
5914 */
5915static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5916{
5917 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5918 if (pFpuCtx->FCW & X86_FCW_IM)
5919 {
5920 /* Masked underflow. */
5921 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5922 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5923 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5924 if (iStReg != UINT8_MAX)
5925 {
5926 pFpuCtx->FTW |= RT_BIT(iReg);
5927 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5928 }
5929 }
5930 else
5931 {
5932 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5933 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5934 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5935 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5936 }
5937 RT_NOREF(pVCpu);
5938}
5939
5940
5941/**
5942 * Raises a FPU stack underflow exception.
5943 *
5944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5945 * @param iStReg The destination register that should be loaded
5946 * with QNaN if \#IS is not masked. Specify
5947 * UINT8_MAX if none (like for fcom).
5948 * @param uFpuOpcode The FPU opcode value.
5949 */
5950void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5951{
5952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5953 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5954 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5955}
5956
5957
5958void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5959{
5960 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5961 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5962 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5963 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5964}
5965
5966
5967void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5968{
5969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5970 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5971 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5972 iemFpuMaybePopOne(pFpuCtx);
5973}
5974
5975
5976void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5977 uint16_t uFpuOpcode) RT_NOEXCEPT
5978{
5979 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5980 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5981 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5982 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5983 iemFpuMaybePopOne(pFpuCtx);
5984}
5985
5986
5987void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5988{
5989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5991 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5992 iemFpuMaybePopOne(pFpuCtx);
5993 iemFpuMaybePopOne(pFpuCtx);
5994}
5995
5996
5997void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5998{
5999 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6000 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6001
6002 if (pFpuCtx->FCW & X86_FCW_IM)
6003 {
6004 /* Masked overflow - Push QNaN. */
6005 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6006 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6007 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6008 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6009 pFpuCtx->FTW |= RT_BIT(iNewTop);
6010 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6011 iemFpuRotateStackPush(pFpuCtx);
6012 }
6013 else
6014 {
6015 /* Exception pending - don't change TOP or the register stack. */
6016 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6017 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6018 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
6019 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6020 }
6021}
6022
6023
6024void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
6025{
6026 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6027 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6028
6029 if (pFpuCtx->FCW & X86_FCW_IM)
6030 {
6031 /* Masked overflow - Push QNaN. */
6032 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6033 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6034 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6035 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6036 pFpuCtx->FTW |= RT_BIT(iNewTop);
6037 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6038 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6039 iemFpuRotateStackPush(pFpuCtx);
6040 }
6041 else
6042 {
6043 /* Exception pending - don't change TOP or the register stack. */
6044 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6045 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6046 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
6047 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6048 }
6049}
6050
6051
6052/**
6053 * Worker routine for raising an FPU stack overflow exception on a push.
6054 *
6055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6056 * @param pFpuCtx The FPU context.
6057 */
6058static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
6059{
6060 if (pFpuCtx->FCW & X86_FCW_IM)
6061 {
6062 /* Masked overflow. */
6063 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6064 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6065 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6066 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6067 pFpuCtx->FTW |= RT_BIT(iNewTop);
6068 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6069 iemFpuRotateStackPush(pFpuCtx);
6070 }
6071 else
6072 {
6073 /* Exception pending - don't change TOP or the register stack. */
6074 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6075 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6076 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
6077 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6078 }
6079 RT_NOREF(pVCpu);
6080}
6081
6082
6083/**
6084 * Raises a FPU stack overflow exception on a push.
6085 *
6086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6087 * @param uFpuOpcode The FPU opcode value.
6088 */
6089void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
6090{
6091 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6092 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6093 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
6094}
6095
6096
6097/**
6098 * Raises a FPU stack overflow exception on a push with a memory operand.
6099 *
6100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6101 * @param iEffSeg The effective memory operand selector register.
6102 * @param GCPtrEff The effective memory operand offset.
6103 * @param uFpuOpcode The FPU opcode value.
6104 */
6105void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
6106{
6107 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6108 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
6109 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6110 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
6111}
6112
6113/** @} */
6114
6115
6116/** @name Memory access.
6117 *
6118 * @{
6119 */
6120
6121#undef LOG_GROUP
6122#define LOG_GROUP LOG_GROUP_IEM_MEM
6123
6124/**
6125 * Applies the segment limit, base and attributes.
6126 *
6127 * This may raise a \#GP or \#SS.
6128 *
6129 * @returns VBox strict status code.
6130 *
6131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6132 * @param fAccess The kind of access which is being performed.
6133 * @param iSegReg The index of the segment register to apply.
6134 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6135 * TSS, ++).
6136 * @param cbMem The access size.
6137 * @param pGCPtrMem Pointer to the guest memory address to apply
6138 * segmentation to. Input and output parameter.
6139 */
6140VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
6141{
6142 if (iSegReg == UINT8_MAX)
6143 return VINF_SUCCESS;
6144
6145 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6146 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
6147 switch (IEM_GET_CPU_MODE(pVCpu))
6148 {
6149 case IEMMODE_16BIT:
6150 case IEMMODE_32BIT:
6151 {
6152 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6153 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6154
6155 if ( pSel->Attr.n.u1Present
6156 && !pSel->Attr.n.u1Unusable)
6157 {
6158 Assert(pSel->Attr.n.u1DescType);
6159 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6160 {
6161 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6162 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6163 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
6164
6165 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
6166 {
6167 /** @todo CPL check. */
6168 }
6169
6170 /*
6171 * There are two kinds of data selectors, normal and expand down.
6172 */
6173 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6174 {
6175 if ( GCPtrFirst32 > pSel->u32Limit
6176 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6177 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6178 }
6179 else
6180 {
6181 /*
6182 * The upper boundary is defined by the B bit, not the G bit!
6183 */
6184 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6185 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6186 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6187 }
6188 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6189 }
6190 else
6191 {
6192 /*
6193 * Code selector and usually be used to read thru, writing is
6194 * only permitted in real and V8086 mode.
6195 */
6196 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6197 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6198 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6199 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
6200 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
6201
6202 if ( GCPtrFirst32 > pSel->u32Limit
6203 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6204 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6205
6206 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
6207 {
6208 /** @todo CPL check. */
6209 }
6210
6211 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6212 }
6213 }
6214 else
6215 return iemRaiseGeneralProtectionFault0(pVCpu);
6216 return VINF_SUCCESS;
6217 }
6218
6219 case IEMMODE_64BIT:
6220 {
6221 RTGCPTR GCPtrMem = *pGCPtrMem;
6222 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6223 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6224
6225 Assert(cbMem >= 1);
6226 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6227 return VINF_SUCCESS;
6228 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
6229 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
6230 return iemRaiseGeneralProtectionFault0(pVCpu);
6231 }
6232
6233 default:
6234 AssertFailedReturn(VERR_IEM_IPE_7);
6235 }
6236}
6237
6238
6239/**
6240 * Translates a virtual address to a physical physical address and checks if we
6241 * can access the page as specified.
6242 *
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param GCPtrMem The virtual address.
6245 * @param cbAccess The access size, for raising \#PF correctly for
6246 * FXSAVE and such.
6247 * @param fAccess The intended access.
6248 * @param pGCPhysMem Where to return the physical address.
6249 */
6250VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
6251 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
6252{
6253 /** @todo Need a different PGM interface here. We're currently using
6254 * generic / REM interfaces. this won't cut it for R0. */
6255 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
6256 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
6257 * here. */
6258 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6259 PGMPTWALKFAST WalkFast;
6260 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6261 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6262 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6263 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6264 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6265 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6266 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6267 fQPage |= PGMQPAGE_F_USER_MODE;
6268 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6269 if (RT_SUCCESS(rc))
6270 {
6271 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6272
6273 /* If the page is writable and does not have the no-exec bit set, all
6274 access is allowed. Otherwise we'll have to check more carefully... */
6275 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6276 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6277 || (WalkFast.fEffective & X86_PTE_RW)
6278 || ( ( IEM_GET_CPL(pVCpu) != 3
6279 || (fAccess & IEM_ACCESS_WHAT_SYS))
6280 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6281 && ( (WalkFast.fEffective & X86_PTE_US)
6282 || IEM_GET_CPL(pVCpu) != 3
6283 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6284 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6285 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6286 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6287 )
6288 );
6289
6290 /* PGMGstQueryPageFast sets the A & D bits. */
6291 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6292 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6293
6294 *pGCPhysMem = WalkFast.GCPhys;
6295 return VINF_SUCCESS;
6296 }
6297
6298 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6299 /** @todo Check unassigned memory in unpaged mode. */
6300#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6301 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6302 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6303#endif
6304 *pGCPhysMem = NIL_RTGCPHYS;
6305 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6306}
6307
6308#if 0 /*unused*/
6309/**
6310 * Looks up a memory mapping entry.
6311 *
6312 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6314 * @param pvMem The memory address.
6315 * @param fAccess The access to.
6316 */
6317DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6318{
6319 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6320 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6321 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6322 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6323 return 0;
6324 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6325 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6326 return 1;
6327 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6328 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6329 return 2;
6330 return VERR_NOT_FOUND;
6331}
6332#endif
6333
6334/**
6335 * Finds a free memmap entry when using iNextMapping doesn't work.
6336 *
6337 * @returns Memory mapping index, 1024 on failure.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 */
6340static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6341{
6342 /*
6343 * The easy case.
6344 */
6345 if (pVCpu->iem.s.cActiveMappings == 0)
6346 {
6347 pVCpu->iem.s.iNextMapping = 1;
6348 return 0;
6349 }
6350
6351 /* There should be enough mappings for all instructions. */
6352 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6353
6354 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6355 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6356 return i;
6357
6358 AssertFailedReturn(1024);
6359}
6360
6361
6362/**
6363 * Commits a bounce buffer that needs writing back and unmaps it.
6364 *
6365 * @returns Strict VBox status code.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iMemMap The index of the buffer to commit.
6368 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6369 * Always false in ring-3, obviously.
6370 */
6371static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6372{
6373 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6374 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6375#ifdef IN_RING3
6376 Assert(!fPostponeFail);
6377 RT_NOREF_PV(fPostponeFail);
6378#endif
6379
6380 /*
6381 * Do the writing.
6382 */
6383 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6384 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6385 {
6386 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6387 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6388 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6389 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6390 {
6391 /*
6392 * Carefully and efficiently dealing with access handler return
6393 * codes make this a little bloated.
6394 */
6395 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6397 pbBuf,
6398 cbFirst,
6399 PGMACCESSORIGIN_IEM);
6400 if (rcStrict == VINF_SUCCESS)
6401 {
6402 if (cbSecond)
6403 {
6404 rcStrict = PGMPhysWrite(pVM,
6405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6406 pbBuf + cbFirst,
6407 cbSecond,
6408 PGMACCESSORIGIN_IEM);
6409 if (rcStrict == VINF_SUCCESS)
6410 { /* nothing */ }
6411 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6412 {
6413 LogEx(LOG_GROUP_IEM,
6414 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6417 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6418 }
6419#ifndef IN_RING3
6420 else if (fPostponeFail)
6421 {
6422 LogEx(LOG_GROUP_IEM,
6423 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6426 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6427 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6428 return iemSetPassUpStatus(pVCpu, rcStrict);
6429 }
6430#endif
6431 else
6432 {
6433 LogEx(LOG_GROUP_IEM,
6434 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6437 return rcStrict;
6438 }
6439 }
6440 }
6441 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6442 {
6443 if (!cbSecond)
6444 {
6445 LogEx(LOG_GROUP_IEM,
6446 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6447 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6448 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6449 }
6450 else
6451 {
6452 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6454 pbBuf + cbFirst,
6455 cbSecond,
6456 PGMACCESSORIGIN_IEM);
6457 if (rcStrict2 == VINF_SUCCESS)
6458 {
6459 LogEx(LOG_GROUP_IEM,
6460 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6463 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6464 }
6465 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6466 {
6467 LogEx(LOG_GROUP_IEM,
6468 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6471 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6473 }
6474#ifndef IN_RING3
6475 else if (fPostponeFail)
6476 {
6477 LogEx(LOG_GROUP_IEM,
6478 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6481 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6482 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6483 return iemSetPassUpStatus(pVCpu, rcStrict);
6484 }
6485#endif
6486 else
6487 {
6488 LogEx(LOG_GROUP_IEM,
6489 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6492 return rcStrict2;
6493 }
6494 }
6495 }
6496#ifndef IN_RING3
6497 else if (fPostponeFail)
6498 {
6499 LogEx(LOG_GROUP_IEM,
6500 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6503 if (!cbSecond)
6504 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6505 else
6506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6507 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6508 return iemSetPassUpStatus(pVCpu, rcStrict);
6509 }
6510#endif
6511 else
6512 {
6513 LogEx(LOG_GROUP_IEM,
6514 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6517 return rcStrict;
6518 }
6519 }
6520 else
6521 {
6522 /*
6523 * No access handlers, much simpler.
6524 */
6525 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6526 if (RT_SUCCESS(rc))
6527 {
6528 if (cbSecond)
6529 {
6530 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6531 if (RT_SUCCESS(rc))
6532 { /* likely */ }
6533 else
6534 {
6535 LogEx(LOG_GROUP_IEM,
6536 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6539 return rc;
6540 }
6541 }
6542 }
6543 else
6544 {
6545 LogEx(LOG_GROUP_IEM,
6546 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6549 return rc;
6550 }
6551 }
6552 }
6553
6554#if defined(IEM_LOG_MEMORY_WRITES)
6555 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6556 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6557 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6558 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6559 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6560 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6561
6562 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6563 g_cbIemWrote = cbWrote;
6564 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6565#endif
6566
6567 /*
6568 * Free the mapping entry.
6569 */
6570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6571 Assert(pVCpu->iem.s.cActiveMappings != 0);
6572 pVCpu->iem.s.cActiveMappings--;
6573 return VINF_SUCCESS;
6574}
6575
6576
6577/**
6578 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6579 */
6580DECL_FORCE_INLINE(uint32_t)
6581iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6582{
6583 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6584 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6585 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6586 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6587}
6588
6589
6590/**
6591 * iemMemMap worker that deals with a request crossing pages.
6592 */
6593static VBOXSTRICTRC
6594iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6595 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6596{
6597 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6598 Assert(cbMem <= GUEST_PAGE_SIZE);
6599
6600 /*
6601 * Do the address translations.
6602 */
6603 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6604 RTGCPHYS GCPhysFirst;
6605 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6606 if (rcStrict != VINF_SUCCESS)
6607 return rcStrict;
6608 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6609
6610 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6611 RTGCPHYS GCPhysSecond;
6612 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6613 cbSecondPage, fAccess, &GCPhysSecond);
6614 if (rcStrict != VINF_SUCCESS)
6615 return rcStrict;
6616 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6617 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6618
6619 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6620
6621 /*
6622 * Check for data breakpoints.
6623 */
6624 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6625 { /* likely */ }
6626 else
6627 {
6628 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6629 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6630 cbSecondPage, fAccess);
6631 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6632 if (fDataBps > 1)
6633 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6634 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6635 }
6636
6637 /*
6638 * Read in the current memory content if it's a read, execute or partial
6639 * write access.
6640 */
6641 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6642
6643 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6644 {
6645 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6646 {
6647 /*
6648 * Must carefully deal with access handler status codes here,
6649 * makes the code a bit bloated.
6650 */
6651 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6652 if (rcStrict == VINF_SUCCESS)
6653 {
6654 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6655 if (rcStrict == VINF_SUCCESS)
6656 { /*likely */ }
6657 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6658 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6659 else
6660 {
6661 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6662 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6663 return rcStrict;
6664 }
6665 }
6666 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6667 {
6668 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6669 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6670 {
6671 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6672 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6673 }
6674 else
6675 {
6676 LogEx(LOG_GROUP_IEM,
6677 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6678 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6679 return rcStrict2;
6680 }
6681 }
6682 else
6683 {
6684 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6685 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6686 return rcStrict;
6687 }
6688 }
6689 else
6690 {
6691 /*
6692 * No informational status codes here, much more straight forward.
6693 */
6694 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6695 if (RT_SUCCESS(rc))
6696 {
6697 Assert(rc == VINF_SUCCESS);
6698 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6699 if (RT_SUCCESS(rc))
6700 Assert(rc == VINF_SUCCESS);
6701 else
6702 {
6703 LogEx(LOG_GROUP_IEM,
6704 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6705 return rc;
6706 }
6707 }
6708 else
6709 {
6710 LogEx(LOG_GROUP_IEM,
6711 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6712 return rc;
6713 }
6714 }
6715 }
6716#ifdef VBOX_STRICT
6717 else
6718 memset(pbBuf, 0xcc, cbMem);
6719 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6720 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6721#endif
6722 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6723
6724 /*
6725 * Commit the bounce buffer entry.
6726 */
6727 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6728 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6729 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6730 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6731 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6732 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6733 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6734 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6735 pVCpu->iem.s.cActiveMappings++;
6736
6737 *ppvMem = pbBuf;
6738 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6739 return VINF_SUCCESS;
6740}
6741
6742
6743/**
6744 * iemMemMap woker that deals with iemMemPageMap failures.
6745 */
6746static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6747 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6748{
6749 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6750
6751 /*
6752 * Filter out conditions we can handle and the ones which shouldn't happen.
6753 */
6754 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6755 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6756 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6757 {
6758 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6759 return rcMap;
6760 }
6761 pVCpu->iem.s.cPotentialExits++;
6762
6763 /*
6764 * Read in the current memory content if it's a read, execute or partial
6765 * write access.
6766 */
6767 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6768 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6769 {
6770 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6771 memset(pbBuf, 0xff, cbMem);
6772 else
6773 {
6774 int rc;
6775 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6776 {
6777 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6778 if (rcStrict == VINF_SUCCESS)
6779 { /* nothing */ }
6780 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6781 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6782 else
6783 {
6784 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6785 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6786 return rcStrict;
6787 }
6788 }
6789 else
6790 {
6791 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6792 if (RT_SUCCESS(rc))
6793 { /* likely */ }
6794 else
6795 {
6796 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6797 GCPhysFirst, rc));
6798 return rc;
6799 }
6800 }
6801 }
6802 }
6803#ifdef VBOX_STRICT
6804 else
6805 memset(pbBuf, 0xcc, cbMem);
6806#endif
6807#ifdef VBOX_STRICT
6808 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6809 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6810#endif
6811
6812 /*
6813 * Commit the bounce buffer entry.
6814 */
6815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6817 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6818 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6819 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6820 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6821 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6822 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6823 pVCpu->iem.s.cActiveMappings++;
6824
6825 *ppvMem = pbBuf;
6826 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6827 return VINF_SUCCESS;
6828}
6829
6830
6831
6832/**
6833 * Maps the specified guest memory for the given kind of access.
6834 *
6835 * This may be using bounce buffering of the memory if it's crossing a page
6836 * boundary or if there is an access handler installed for any of it. Because
6837 * of lock prefix guarantees, we're in for some extra clutter when this
6838 * happens.
6839 *
6840 * This may raise a \#GP, \#SS, \#PF or \#AC.
6841 *
6842 * @returns VBox strict status code.
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 * @param ppvMem Where to return the pointer to the mapped memory.
6846 * @param pbUnmapInfo Where to return unmap info to be passed to
6847 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6848 * done.
6849 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6850 * 8, 12, 16, 32 or 512. When used by string operations
6851 * it can be up to a page.
6852 * @param iSegReg The index of the segment register to use for this
6853 * access. The base and limits are checked. Use UINT8_MAX
6854 * to indicate that no segmentation is required (for IDT,
6855 * GDT and LDT accesses).
6856 * @param GCPtrMem The address of the guest memory.
6857 * @param fAccess How the memory is being accessed. The
6858 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6859 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6860 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6861 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6862 * set.
6863 * @param uAlignCtl Alignment control:
6864 * - Bits 15:0 is the alignment mask.
6865 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6866 * IEM_MEMMAP_F_ALIGN_SSE, and
6867 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6868 * Pass zero to skip alignment.
6869 */
6870VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6871 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6872{
6873 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6874
6875 /*
6876 * Check the input and figure out which mapping entry to use.
6877 */
6878 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6879 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6880 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6881 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6882 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6883
6884 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6885 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6886 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6887 {
6888 iMemMap = iemMemMapFindFree(pVCpu);
6889 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6890 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6891 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6892 pVCpu->iem.s.aMemMappings[2].fAccess),
6893 VERR_IEM_IPE_9);
6894 }
6895
6896 /*
6897 * Map the memory, checking that we can actually access it. If something
6898 * slightly complicated happens, fall back on bounce buffering.
6899 */
6900 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6901 if (rcStrict == VINF_SUCCESS)
6902 { /* likely */ }
6903 else
6904 return rcStrict;
6905
6906 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6907 { /* likely */ }
6908 else
6909 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6910
6911 /*
6912 * Alignment check.
6913 */
6914 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6915 { /* likelyish */ }
6916 else
6917 {
6918 /* Misaligned access. */
6919 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6920 {
6921 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6922 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6923 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6924 {
6925 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6926
6927 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6928 { /* likely */ }
6929 else
6930 return iemRaiseAlignmentCheckException(pVCpu);
6931 }
6932 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6933 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6934 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6935 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6936 * that's what FXSAVE does on a 10980xe. */
6937 && iemMemAreAlignmentChecksEnabled(pVCpu))
6938 return iemRaiseAlignmentCheckException(pVCpu);
6939 else
6940 return iemRaiseGeneralProtectionFault0(pVCpu);
6941 }
6942
6943#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6944 /* If the access is atomic there are host platform alignmnet restrictions
6945 we need to conform with. */
6946 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6947# if defined(RT_ARCH_AMD64)
6948 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6949# elif defined(RT_ARCH_ARM64)
6950 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6951# else
6952# error port me
6953# endif
6954 )
6955 { /* okay */ }
6956 else
6957 {
6958 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6959 pVCpu->iem.s.cMisalignedAtomics += 1;
6960 return VINF_EM_EMULATE_SPLIT_LOCK;
6961 }
6962#endif
6963 }
6964
6965#ifdef IEM_WITH_DATA_TLB
6966 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6967
6968 /*
6969 * Get the TLB entry for this page and check PT flags.
6970 *
6971 * We reload the TLB entry if we need to set the dirty bit (accessed
6972 * should in theory always be set).
6973 */
6974 uint8_t *pbMem = NULL;
6975 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6976 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6977 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6978 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6979 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6980 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6981 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6982 {
6983# ifdef IEM_WITH_TLB_STATISTICS
6984 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6985# endif
6986
6987 /* If the page is either supervisor only or non-writable, we need to do
6988 more careful access checks. */
6989 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6990 {
6991 /* Write to read only memory? */
6992 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6993 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6994 && ( ( IEM_GET_CPL(pVCpu) == 3
6995 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6996 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6997 {
6998 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6999 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7000 }
7001
7002 /* Kernel memory accessed by userland? */
7003 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
7004 && IEM_GET_CPL(pVCpu) == 3
7005 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7006 {
7007 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7008 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7009 }
7010 }
7011
7012 /* Look up the physical page info if necessary. */
7013 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7014# ifdef IN_RING3
7015 pbMem = pTlbe->pbMappingR3;
7016# else
7017 pbMem = NULL;
7018# endif
7019 else
7020 {
7021 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
7022 { /* likely */ }
7023 else
7024 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
7025 pTlbe->pbMappingR3 = NULL;
7026 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7027 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7028 &pbMem, &pTlbe->fFlagsAndPhysRev);
7029 AssertRCReturn(rc, rc);
7030# ifdef IN_RING3
7031 pTlbe->pbMappingR3 = pbMem;
7032# endif
7033 }
7034 }
7035 else
7036 {
7037 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7038
7039 /* This page table walking will set A bits as required by the access while performing the walk.
7040 ASSUMES these are set when the address is translated rather than on commit... */
7041 /** @todo testcase: check when A bits are actually set by the CPU for code. */
7042 PGMPTWALKFAST WalkFast;
7043 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7044 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7045 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7046 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7047 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7048 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7049 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7050 fQPage |= PGMQPAGE_F_USER_MODE;
7051 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7052 if (RT_SUCCESS(rc))
7053 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7054 else
7055 {
7056 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7057# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7058 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7059 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7060# endif
7061 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7062 }
7063
7064 uint32_t fDataBps;
7065 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7066 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7067 {
7068 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7069 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7070 {
7071 pTlbe--;
7072 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7073 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7074 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7075# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7076 else
7077 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
7078# endif
7079 }
7080 else
7081 {
7082 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7083 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7084 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7085 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7086# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7087 else
7088 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
7089# endif
7090 }
7091 }
7092 else
7093 {
7094 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7095 to the page with the data access breakpoint armed on it to pass thru here. */
7096 if (fDataBps > 1)
7097 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7098 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7099 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7100 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7101 pTlbe->uTag = uTagNoRev;
7102 }
7103 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7104 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7105 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7106 pTlbe->GCPhys = GCPhysPg;
7107 pTlbe->pbMappingR3 = NULL;
7108 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
7109 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
7110 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
7111 || !(fAccess & IEM_ACCESS_TYPE_WRITE)
7112 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
7113 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
7114 || IEM_GET_CPL(pVCpu) != 3
7115 || (fAccess & IEM_ACCESS_WHAT_SYS));
7116
7117 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7118 {
7119 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7120 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7121 else
7122 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7123 }
7124
7125 /* Resolve the physical address. */
7126 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7127 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7128 &pbMem, &pTlbe->fFlagsAndPhysRev);
7129 AssertRCReturn(rc, rc);
7130# ifdef IN_RING3
7131 pTlbe->pbMappingR3 = pbMem;
7132# endif
7133 }
7134
7135 /*
7136 * Check the physical page level access and mapping.
7137 */
7138 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
7139 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
7140 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
7141 { /* probably likely */ }
7142 else
7143 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
7144 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7145 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7146 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7147 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7148 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7149
7150 if (pbMem)
7151 {
7152 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7153 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7154 fAccess |= IEM_ACCESS_NOT_LOCKED;
7155 }
7156 else
7157 {
7158 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7159 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7160 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7161 if (rcStrict != VINF_SUCCESS)
7162 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7163 }
7164
7165 void * const pvMem = pbMem;
7166
7167 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7168 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7169 if (fAccess & IEM_ACCESS_TYPE_READ)
7170 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7171
7172#else /* !IEM_WITH_DATA_TLB */
7173
7174 RTGCPHYS GCPhysFirst;
7175 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7176 if (rcStrict != VINF_SUCCESS)
7177 return rcStrict;
7178
7179 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7180 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7181 if (fAccess & IEM_ACCESS_TYPE_READ)
7182 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7183
7184 void *pvMem;
7185 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7186 if (rcStrict != VINF_SUCCESS)
7187 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7188
7189#endif /* !IEM_WITH_DATA_TLB */
7190
7191 /*
7192 * Fill in the mapping table entry.
7193 */
7194 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7195 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7196 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7197 pVCpu->iem.s.cActiveMappings += 1;
7198
7199 *ppvMem = pvMem;
7200 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7201 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
7202 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
7203
7204 return VINF_SUCCESS;
7205}
7206
7207
7208/**
7209 * Commits the guest memory if bounce buffered and unmaps it.
7210 *
7211 * @returns Strict VBox status code.
7212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7213 * @param bUnmapInfo Unmap info set by iemMemMap.
7214 */
7215VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7216{
7217 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7218 AssertMsgReturn( (bUnmapInfo & 0x08)
7219 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7220 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
7221 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7222 VERR_NOT_FOUND);
7223
7224 /* If it's bounce buffered, we may need to write back the buffer. */
7225 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7226 {
7227 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7228 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7229 }
7230 /* Otherwise unlock it. */
7231 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7232 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7233
7234 /* Free the entry. */
7235 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7236 Assert(pVCpu->iem.s.cActiveMappings != 0);
7237 pVCpu->iem.s.cActiveMappings--;
7238 return VINF_SUCCESS;
7239}
7240
7241
7242/**
7243 * Rolls back the guest memory (conceptually only) and unmaps it.
7244 *
7245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7246 * @param bUnmapInfo Unmap info set by iemMemMap.
7247 */
7248void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7249{
7250 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7251 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7252 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7253 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7254 == ((unsigned)bUnmapInfo >> 4),
7255 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7256
7257 /* Unlock it if necessary. */
7258 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7259 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7260
7261 /* Free the entry. */
7262 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7263 Assert(pVCpu->iem.s.cActiveMappings != 0);
7264 pVCpu->iem.s.cActiveMappings--;
7265}
7266
7267#ifdef IEM_WITH_SETJMP
7268
7269/**
7270 * Maps the specified guest memory for the given kind of access, longjmp on
7271 * error.
7272 *
7273 * This may be using bounce buffering of the memory if it's crossing a page
7274 * boundary or if there is an access handler installed for any of it. Because
7275 * of lock prefix guarantees, we're in for some extra clutter when this
7276 * happens.
7277 *
7278 * This may raise a \#GP, \#SS, \#PF or \#AC.
7279 *
7280 * @returns Pointer to the mapped memory.
7281 *
7282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7283 * @param bUnmapInfo Where to return unmap info to be passed to
7284 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
7285 * iemMemCommitAndUnmapWoSafeJmp,
7286 * iemMemCommitAndUnmapRoSafeJmp,
7287 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
7288 * when done.
7289 * @param cbMem The number of bytes to map. This is usually 1,
7290 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7291 * string operations it can be up to a page.
7292 * @param iSegReg The index of the segment register to use for
7293 * this access. The base and limits are checked.
7294 * Use UINT8_MAX to indicate that no segmentation
7295 * is required (for IDT, GDT and LDT accesses).
7296 * @param GCPtrMem The address of the guest memory.
7297 * @param fAccess How the memory is being accessed. The
7298 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7299 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7300 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7301 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7302 * set.
7303 * @param uAlignCtl Alignment control:
7304 * - Bits 15:0 is the alignment mask.
7305 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7306 * IEM_MEMMAP_F_ALIGN_SSE, and
7307 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7308 * Pass zero to skip alignment.
7309 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7310 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7311 * needs counting as such in the statistics.
7312 */
7313template<bool a_fSafeCall = false>
7314static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7315 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7316{
7317 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7318
7319 /*
7320 * Check the input, check segment access and adjust address
7321 * with segment base.
7322 */
7323 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7324 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7325 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7326
7327 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7328 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7329 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7330
7331 /*
7332 * Alignment check.
7333 */
7334 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7335 { /* likelyish */ }
7336 else
7337 {
7338 /* Misaligned access. */
7339 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7340 {
7341 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7342 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7343 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7344 {
7345 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7346
7347 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7348 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7349 }
7350 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7351 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7352 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7353 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7354 * that's what FXSAVE does on a 10980xe. */
7355 && iemMemAreAlignmentChecksEnabled(pVCpu))
7356 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7357 else
7358 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7359 }
7360
7361#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7362 /* If the access is atomic there are host platform alignmnet restrictions
7363 we need to conform with. */
7364 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7365# if defined(RT_ARCH_AMD64)
7366 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7367# elif defined(RT_ARCH_ARM64)
7368 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7369# else
7370# error port me
7371# endif
7372 )
7373 { /* okay */ }
7374 else
7375 {
7376 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7377 pVCpu->iem.s.cMisalignedAtomics += 1;
7378 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7379 }
7380#endif
7381 }
7382
7383 /*
7384 * Figure out which mapping entry to use.
7385 */
7386 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7387 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7388 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7389 {
7390 iMemMap = iemMemMapFindFree(pVCpu);
7391 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7392 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7393 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7394 pVCpu->iem.s.aMemMappings[2].fAccess),
7395 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7396 }
7397
7398 /*
7399 * Crossing a page boundary?
7400 */
7401 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7402 { /* No (likely). */ }
7403 else
7404 {
7405 void *pvMem;
7406 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7407 if (rcStrict == VINF_SUCCESS)
7408 return pvMem;
7409 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7410 }
7411
7412#ifdef IEM_WITH_DATA_TLB
7413 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7414
7415 /*
7416 * Get the TLB entry for this page checking that it has the A & D bits
7417 * set as per fAccess flags.
7418 */
7419 /** @todo make the caller pass these in with fAccess. */
7420 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7421 ? IEMTLBE_F_PT_NO_USER : 0;
7422 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7423 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7424 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7425 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7426 ? IEMTLBE_F_PT_NO_WRITE : 0)
7427 : 0;
7428 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7429 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7430 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7431 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7432 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7433 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7434 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7435 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7436 {
7437# ifdef IEM_WITH_TLB_STATISTICS
7438 if (a_fSafeCall)
7439 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7440 else
7441 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7442# endif
7443 }
7444 else
7445 {
7446 if (a_fSafeCall)
7447 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7448 else
7449 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7450
7451 /* This page table walking will set A and D bits as required by the
7452 access while performing the walk.
7453 ASSUMES these are set when the address is translated rather than on commit... */
7454 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7455 PGMPTWALKFAST WalkFast;
7456 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7457 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7458 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7459 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7460 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7461 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7462 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7463 fQPage |= PGMQPAGE_F_USER_MODE;
7464 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7465 if (RT_SUCCESS(rc))
7466 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7467 else
7468 {
7469 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7470# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7471 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7472 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7473# endif
7474 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7475 }
7476
7477 uint32_t fDataBps;
7478 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7479 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7480 {
7481 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7482 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7483 {
7484 pTlbe--;
7485 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7486 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7487 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7488# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7489 else
7490 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
7491# endif
7492 }
7493 else
7494 {
7495 if (a_fSafeCall)
7496 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7497 else
7498 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7499 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7500 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7501 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7502# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7503 else
7504 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
7505# endif
7506 }
7507 }
7508 else
7509 {
7510 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7511 to the page with the data access breakpoint armed on it to pass thru here. */
7512 if (fDataBps > 1)
7513 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7514 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7515 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7516 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7517 pTlbe->uTag = uTagNoRev;
7518 }
7519 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7520 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7521 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7522 pTlbe->GCPhys = GCPhysPg;
7523 pTlbe->pbMappingR3 = NULL;
7524 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7525 Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7526 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
7527 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7528
7529 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7530 {
7531 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7532 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7533 else
7534 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7535 }
7536
7537 /* Resolve the physical address. */
7538 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7539 uint8_t *pbMemFullLoad = NULL;
7540 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7541 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7542 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7543# ifdef IN_RING3
7544 pTlbe->pbMappingR3 = pbMemFullLoad;
7545# endif
7546 }
7547
7548 /*
7549 * Check the flags and physical revision.
7550 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7551 * just to keep the code structure simple (i.e. avoid gotos or similar).
7552 */
7553 uint8_t *pbMem;
7554 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7555 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7556# ifdef IN_RING3
7557 pbMem = pTlbe->pbMappingR3;
7558# else
7559 pbMem = NULL;
7560# endif
7561 else
7562 {
7563 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7564
7565 /*
7566 * Okay, something isn't quite right or needs refreshing.
7567 */
7568 /* Write to read only memory? */
7569 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7570 {
7571 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7572# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7573/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7574 * to trigger an \#PG or a VM nested paging exit here yet! */
7575 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7576 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7577# endif
7578 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7579 }
7580
7581 /* Kernel memory accessed by userland? */
7582 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7583 {
7584 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7585# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7586/** @todo TLB: See above. */
7587 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7588 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7589# endif
7590 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7591 }
7592
7593 /*
7594 * Check if the physical page info needs updating.
7595 */
7596 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7597# ifdef IN_RING3
7598 pbMem = pTlbe->pbMappingR3;
7599# else
7600 pbMem = NULL;
7601# endif
7602 else
7603 {
7604 pTlbe->pbMappingR3 = NULL;
7605 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7606 pbMem = NULL;
7607 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7608 &pbMem, &pTlbe->fFlagsAndPhysRev);
7609 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7610# ifdef IN_RING3
7611 pTlbe->pbMappingR3 = pbMem;
7612# endif
7613 }
7614
7615 /*
7616 * Check the physical page level access and mapping.
7617 */
7618 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7619 { /* probably likely */ }
7620 else
7621 {
7622 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7623 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7624 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7625 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7626 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7627 if (rcStrict == VINF_SUCCESS)
7628 return pbMem;
7629 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7630 }
7631 }
7632 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7633
7634 if (pbMem)
7635 {
7636 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7637 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7638 fAccess |= IEM_ACCESS_NOT_LOCKED;
7639 }
7640 else
7641 {
7642 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7643 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7644 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7645 if (rcStrict == VINF_SUCCESS)
7646 {
7647 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7648 return pbMem;
7649 }
7650 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7651 }
7652
7653 void * const pvMem = pbMem;
7654
7655 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7656 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7657 if (fAccess & IEM_ACCESS_TYPE_READ)
7658 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7659
7660#else /* !IEM_WITH_DATA_TLB */
7661
7662
7663 RTGCPHYS GCPhysFirst;
7664 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7665 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7666 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7667
7668 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7669 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7670 if (fAccess & IEM_ACCESS_TYPE_READ)
7671 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7672
7673 void *pvMem;
7674 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7675 if (rcStrict == VINF_SUCCESS)
7676 { /* likely */ }
7677 else
7678 {
7679 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7680 if (rcStrict == VINF_SUCCESS)
7681 return pvMem;
7682 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7683 }
7684
7685#endif /* !IEM_WITH_DATA_TLB */
7686
7687 /*
7688 * Fill in the mapping table entry.
7689 */
7690 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7692 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7693 pVCpu->iem.s.cActiveMappings++;
7694
7695 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7696 return pvMem;
7697}
7698
7699
7700/** @see iemMemMapJmp */
7701static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7702 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7703{
7704 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7705}
7706
7707
7708/**
7709 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7710 *
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param pvMem The mapping.
7713 * @param fAccess The kind of access.
7714 */
7715void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7716{
7717 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7718 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7719 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7720 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7721 == ((unsigned)bUnmapInfo >> 4),
7722 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7723
7724 /* If it's bounce buffered, we may need to write back the buffer. */
7725 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7726 {
7727 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7728 {
7729 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7730 if (rcStrict == VINF_SUCCESS)
7731 return;
7732 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7733 }
7734 }
7735 /* Otherwise unlock it. */
7736 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7737 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7738
7739 /* Free the entry. */
7740 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7741 Assert(pVCpu->iem.s.cActiveMappings != 0);
7742 pVCpu->iem.s.cActiveMappings--;
7743}
7744
7745
7746/** Fallback for iemMemCommitAndUnmapRwJmp. */
7747void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7748{
7749 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7750 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7751}
7752
7753
7754/** Fallback for iemMemCommitAndUnmapAtJmp. */
7755void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7756{
7757 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7758 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7759}
7760
7761
7762/** Fallback for iemMemCommitAndUnmapWoJmp. */
7763void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7764{
7765 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7766 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7767}
7768
7769
7770/** Fallback for iemMemCommitAndUnmapRoJmp. */
7771void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7772{
7773 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7774 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7775}
7776
7777
7778/** Fallback for iemMemRollbackAndUnmapWo. */
7779void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7780{
7781 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7782 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7783}
7784
7785#endif /* IEM_WITH_SETJMP */
7786
7787#ifndef IN_RING3
7788/**
7789 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7790 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7791 *
7792 * Allows the instruction to be completed and retired, while the IEM user will
7793 * return to ring-3 immediately afterwards and do the postponed writes there.
7794 *
7795 * @returns VBox status code (no strict statuses). Caller must check
7796 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7798 * @param pvMem The mapping.
7799 * @param fAccess The kind of access.
7800 */
7801VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7802{
7803 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7804 AssertMsgReturn( (bUnmapInfo & 0x08)
7805 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7806 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7807 == ((unsigned)bUnmapInfo >> 4),
7808 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7809 VERR_NOT_FOUND);
7810
7811 /* If it's bounce buffered, we may need to write back the buffer. */
7812 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7813 {
7814 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7815 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7816 }
7817 /* Otherwise unlock it. */
7818 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7819 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7820
7821 /* Free the entry. */
7822 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7823 Assert(pVCpu->iem.s.cActiveMappings != 0);
7824 pVCpu->iem.s.cActiveMappings--;
7825 return VINF_SUCCESS;
7826}
7827#endif
7828
7829
7830/**
7831 * Rollbacks mappings, releasing page locks and such.
7832 *
7833 * The caller shall only call this after checking cActiveMappings.
7834 *
7835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7836 */
7837void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7838{
7839 Assert(pVCpu->iem.s.cActiveMappings > 0);
7840
7841 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7842 while (iMemMap-- > 0)
7843 {
7844 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7845 if (fAccess != IEM_ACCESS_INVALID)
7846 {
7847 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7848 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7849 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7850 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7851 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7852 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7853 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7855 pVCpu->iem.s.cActiveMappings--;
7856 }
7857 }
7858}
7859
7860
7861/*
7862 * Instantiate R/W templates.
7863 */
7864#define TMPL_MEM_WITH_STACK
7865
7866#define TMPL_MEM_TYPE uint8_t
7867#define TMPL_MEM_FN_SUFF U8
7868#define TMPL_MEM_FMT_TYPE "%#04x"
7869#define TMPL_MEM_FMT_DESC "byte"
7870#include "IEMAllMemRWTmpl.cpp.h"
7871
7872#define TMPL_MEM_TYPE uint16_t
7873#define TMPL_MEM_FN_SUFF U16
7874#define TMPL_MEM_FMT_TYPE "%#06x"
7875#define TMPL_MEM_FMT_DESC "word"
7876#include "IEMAllMemRWTmpl.cpp.h"
7877
7878#define TMPL_WITH_PUSH_SREG
7879#define TMPL_MEM_TYPE uint32_t
7880#define TMPL_MEM_FN_SUFF U32
7881#define TMPL_MEM_FMT_TYPE "%#010x"
7882#define TMPL_MEM_FMT_DESC "dword"
7883#include "IEMAllMemRWTmpl.cpp.h"
7884#undef TMPL_WITH_PUSH_SREG
7885
7886#define TMPL_MEM_TYPE uint64_t
7887#define TMPL_MEM_FN_SUFF U64
7888#define TMPL_MEM_FMT_TYPE "%#018RX64"
7889#define TMPL_MEM_FMT_DESC "qword"
7890#include "IEMAllMemRWTmpl.cpp.h"
7891
7892#undef TMPL_MEM_WITH_STACK
7893
7894#define TMPL_MEM_TYPE uint32_t
7895#define TMPL_MEM_TYPE_ALIGN 0
7896#define TMPL_MEM_FN_SUFF U32NoAc
7897#define TMPL_MEM_FMT_TYPE "%#010x"
7898#define TMPL_MEM_FMT_DESC "dword"
7899#include "IEMAllMemRWTmpl.cpp.h"
7900#undef TMPL_WITH_PUSH_SREG
7901
7902#define TMPL_MEM_TYPE uint64_t
7903#define TMPL_MEM_TYPE_ALIGN 0
7904#define TMPL_MEM_FN_SUFF U64NoAc
7905#define TMPL_MEM_FMT_TYPE "%#018RX64"
7906#define TMPL_MEM_FMT_DESC "qword"
7907#include "IEMAllMemRWTmpl.cpp.h"
7908
7909#define TMPL_MEM_TYPE uint64_t
7910#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7911#define TMPL_MEM_FN_SUFF U64AlignedU128
7912#define TMPL_MEM_FMT_TYPE "%#018RX64"
7913#define TMPL_MEM_FMT_DESC "qword"
7914#include "IEMAllMemRWTmpl.cpp.h"
7915
7916/* See IEMAllMemRWTmplInline.cpp.h */
7917#define TMPL_MEM_BY_REF
7918
7919#define TMPL_MEM_TYPE RTFLOAT80U
7920#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7921#define TMPL_MEM_FN_SUFF R80
7922#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7923#define TMPL_MEM_FMT_DESC "tword"
7924#include "IEMAllMemRWTmpl.cpp.h"
7925
7926#define TMPL_MEM_TYPE RTPBCD80U
7927#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7928#define TMPL_MEM_FN_SUFF D80
7929#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7930#define TMPL_MEM_FMT_DESC "tword"
7931#include "IEMAllMemRWTmpl.cpp.h"
7932
7933#define TMPL_MEM_TYPE RTUINT128U
7934#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7935#define TMPL_MEM_FN_SUFF U128
7936#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7937#define TMPL_MEM_FMT_DESC "dqword"
7938#include "IEMAllMemRWTmpl.cpp.h"
7939
7940#define TMPL_MEM_TYPE RTUINT128U
7941#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7942#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7943#define TMPL_MEM_FN_SUFF U128AlignedSse
7944#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7945#define TMPL_MEM_FMT_DESC "dqword"
7946#include "IEMAllMemRWTmpl.cpp.h"
7947
7948#define TMPL_MEM_TYPE RTUINT128U
7949#define TMPL_MEM_TYPE_ALIGN 0
7950#define TMPL_MEM_FN_SUFF U128NoAc
7951#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7952#define TMPL_MEM_FMT_DESC "dqword"
7953#include "IEMAllMemRWTmpl.cpp.h"
7954
7955#define TMPL_MEM_TYPE RTUINT256U
7956#define TMPL_MEM_TYPE_ALIGN 0
7957#define TMPL_MEM_FN_SUFF U256NoAc
7958#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7959#define TMPL_MEM_FMT_DESC "qqword"
7960#include "IEMAllMemRWTmpl.cpp.h"
7961
7962#define TMPL_MEM_TYPE RTUINT256U
7963#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7964#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7965#define TMPL_MEM_FN_SUFF U256AlignedAvx
7966#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7967#define TMPL_MEM_FMT_DESC "qqword"
7968#include "IEMAllMemRWTmpl.cpp.h"
7969
7970/**
7971 * Fetches a data dword and zero extends it to a qword.
7972 *
7973 * @returns Strict VBox status code.
7974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7975 * @param pu64Dst Where to return the qword.
7976 * @param iSegReg The index of the segment register to use for
7977 * this access. The base and limits are checked.
7978 * @param GCPtrMem The address of the guest memory.
7979 */
7980VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7981{
7982 /* The lazy approach for now... */
7983 uint8_t bUnmapInfo;
7984 uint32_t const *pu32Src;
7985 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7986 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7987 if (rc == VINF_SUCCESS)
7988 {
7989 *pu64Dst = *pu32Src;
7990 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7991 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7992 }
7993 return rc;
7994}
7995
7996
7997#ifdef SOME_UNUSED_FUNCTION
7998/**
7999 * Fetches a data dword and sign extends it to a qword.
8000 *
8001 * @returns Strict VBox status code.
8002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8003 * @param pu64Dst Where to return the sign extended value.
8004 * @param iSegReg The index of the segment register to use for
8005 * this access. The base and limits are checked.
8006 * @param GCPtrMem The address of the guest memory.
8007 */
8008VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8009{
8010 /* The lazy approach for now... */
8011 uint8_t bUnmapInfo;
8012 int32_t const *pi32Src;
8013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
8014 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
8015 if (rc == VINF_SUCCESS)
8016 {
8017 *pu64Dst = *pi32Src;
8018 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8019 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
8020 }
8021#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8022 else
8023 *pu64Dst = 0;
8024#endif
8025 return rc;
8026}
8027#endif
8028
8029
8030/**
8031 * Fetches a descriptor register (lgdt, lidt).
8032 *
8033 * @returns Strict VBox status code.
8034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8035 * @param pcbLimit Where to return the limit.
8036 * @param pGCPtrBase Where to return the base.
8037 * @param iSegReg The index of the segment register to use for
8038 * this access. The base and limits are checked.
8039 * @param GCPtrMem The address of the guest memory.
8040 * @param enmOpSize The effective operand size.
8041 */
8042VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
8043 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
8044{
8045 /*
8046 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
8047 * little special:
8048 * - The two reads are done separately.
8049 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
8050 * - We suspect the 386 to actually commit the limit before the base in
8051 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
8052 * don't try emulate this eccentric behavior, because it's not well
8053 * enough understood and rather hard to trigger.
8054 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
8055 */
8056 VBOXSTRICTRC rcStrict;
8057 if (IEM_IS_64BIT_CODE(pVCpu))
8058 {
8059 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8060 if (rcStrict == VINF_SUCCESS)
8061 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
8062 }
8063 else
8064 {
8065 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
8066 if (enmOpSize == IEMMODE_32BIT)
8067 {
8068 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
8069 {
8070 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8071 if (rcStrict == VINF_SUCCESS)
8072 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8073 }
8074 else
8075 {
8076 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
8077 if (rcStrict == VINF_SUCCESS)
8078 {
8079 *pcbLimit = (uint16_t)uTmp;
8080 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8081 }
8082 }
8083 if (rcStrict == VINF_SUCCESS)
8084 *pGCPtrBase = uTmp;
8085 }
8086 else
8087 {
8088 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8089 if (rcStrict == VINF_SUCCESS)
8090 {
8091 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8092 if (rcStrict == VINF_SUCCESS)
8093 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
8094 }
8095 }
8096 }
8097 return rcStrict;
8098}
8099
8100
8101/**
8102 * Stores a data dqword, SSE aligned.
8103 *
8104 * @returns Strict VBox status code.
8105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8106 * @param iSegReg The index of the segment register to use for
8107 * this access. The base and limits are checked.
8108 * @param GCPtrMem The address of the guest memory.
8109 * @param u128Value The value to store.
8110 */
8111VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
8112{
8113 /* The lazy approach for now... */
8114 uint8_t bUnmapInfo;
8115 PRTUINT128U pu128Dst;
8116 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8117 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8118 if (rc == VINF_SUCCESS)
8119 {
8120 pu128Dst->au64[0] = u128Value.au64[0];
8121 pu128Dst->au64[1] = u128Value.au64[1];
8122 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8123 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8124 }
8125 return rc;
8126}
8127
8128
8129#ifdef IEM_WITH_SETJMP
8130/**
8131 * Stores a data dqword, SSE aligned.
8132 *
8133 * @returns Strict VBox status code.
8134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8135 * @param iSegReg The index of the segment register to use for
8136 * this access. The base and limits are checked.
8137 * @param GCPtrMem The address of the guest memory.
8138 * @param u128Value The value to store.
8139 */
8140void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8141 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
8142{
8143 /* The lazy approach for now... */
8144 uint8_t bUnmapInfo;
8145 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8146 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8147 pu128Dst->au64[0] = u128Value.au64[0];
8148 pu128Dst->au64[1] = u128Value.au64[1];
8149 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
8150 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8151}
8152#endif
8153
8154
8155/**
8156 * Stores a data dqword.
8157 *
8158 * @returns Strict VBox status code.
8159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8160 * @param iSegReg The index of the segment register to use for
8161 * this access. The base and limits are checked.
8162 * @param GCPtrMem The address of the guest memory.
8163 * @param pu256Value Pointer to the value to store.
8164 */
8165VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8166{
8167 /* The lazy approach for now... */
8168 uint8_t bUnmapInfo;
8169 PRTUINT256U pu256Dst;
8170 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8171 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8172 if (rc == VINF_SUCCESS)
8173 {
8174 pu256Dst->au64[0] = pu256Value->au64[0];
8175 pu256Dst->au64[1] = pu256Value->au64[1];
8176 pu256Dst->au64[2] = pu256Value->au64[2];
8177 pu256Dst->au64[3] = pu256Value->au64[3];
8178 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8179 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8180 }
8181 return rc;
8182}
8183
8184
8185#ifdef IEM_WITH_SETJMP
8186/**
8187 * Stores a data dqword, longjmp on error.
8188 *
8189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8190 * @param iSegReg The index of the segment register to use for
8191 * this access. The base and limits are checked.
8192 * @param GCPtrMem The address of the guest memory.
8193 * @param pu256Value Pointer to the value to store.
8194 */
8195void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8196{
8197 /* The lazy approach for now... */
8198 uint8_t bUnmapInfo;
8199 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8200 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8201 pu256Dst->au64[0] = pu256Value->au64[0];
8202 pu256Dst->au64[1] = pu256Value->au64[1];
8203 pu256Dst->au64[2] = pu256Value->au64[2];
8204 pu256Dst->au64[3] = pu256Value->au64[3];
8205 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
8206 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8207}
8208#endif
8209
8210
8211/**
8212 * Stores a descriptor register (sgdt, sidt).
8213 *
8214 * @returns Strict VBox status code.
8215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8216 * @param cbLimit The limit.
8217 * @param GCPtrBase The base address.
8218 * @param iSegReg The index of the segment register to use for
8219 * this access. The base and limits are checked.
8220 * @param GCPtrMem The address of the guest memory.
8221 */
8222VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8223{
8224 /*
8225 * The SIDT and SGDT instructions actually stores the data using two
8226 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8227 * does not respond to opsize prefixes.
8228 */
8229 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8230 if (rcStrict == VINF_SUCCESS)
8231 {
8232 if (IEM_IS_16BIT_CODE(pVCpu))
8233 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8234 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8235 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8236 else if (IEM_IS_32BIT_CODE(pVCpu))
8237 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8238 else
8239 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8240 }
8241 return rcStrict;
8242}
8243
8244
8245/**
8246 * Begin a special stack push (used by interrupt, exceptions and such).
8247 *
8248 * This will raise \#SS or \#PF if appropriate.
8249 *
8250 * @returns Strict VBox status code.
8251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8252 * @param cbMem The number of bytes to push onto the stack.
8253 * @param cbAlign The alignment mask (7, 3, 1).
8254 * @param ppvMem Where to return the pointer to the stack memory.
8255 * As with the other memory functions this could be
8256 * direct access or bounce buffered access, so
8257 * don't commit register until the commit call
8258 * succeeds.
8259 * @param pbUnmapInfo Where to store unmap info for
8260 * iemMemStackPushCommitSpecial.
8261 * @param puNewRsp Where to return the new RSP value. This must be
8262 * passed unchanged to
8263 * iemMemStackPushCommitSpecial().
8264 */
8265VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8266 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8267{
8268 Assert(cbMem < UINT8_MAX);
8269 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8270 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
8271}
8272
8273
8274/**
8275 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8276 *
8277 * This will update the rSP.
8278 *
8279 * @returns Strict VBox status code.
8280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8281 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
8282 * @param uNewRsp The new RSP value returned by
8283 * iemMemStackPushBeginSpecial().
8284 */
8285VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
8286{
8287 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8288 if (rcStrict == VINF_SUCCESS)
8289 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8290 return rcStrict;
8291}
8292
8293
8294/**
8295 * Begin a special stack pop (used by iret, retf and such).
8296 *
8297 * This will raise \#SS or \#PF if appropriate.
8298 *
8299 * @returns Strict VBox status code.
8300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8301 * @param cbMem The number of bytes to pop from the stack.
8302 * @param cbAlign The alignment mask (7, 3, 1).
8303 * @param ppvMem Where to return the pointer to the stack memory.
8304 * @param pbUnmapInfo Where to store unmap info for
8305 * iemMemStackPopDoneSpecial.
8306 * @param puNewRsp Where to return the new RSP value. This must be
8307 * assigned to CPUMCTX::rsp manually some time
8308 * after iemMemStackPopDoneSpecial() has been
8309 * called.
8310 */
8311VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8312 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8313{
8314 Assert(cbMem < UINT8_MAX);
8315 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8316 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8317}
8318
8319
8320/**
8321 * Continue a special stack pop (used by iret and retf), for the purpose of
8322 * retrieving a new stack pointer.
8323 *
8324 * This will raise \#SS or \#PF if appropriate.
8325 *
8326 * @returns Strict VBox status code.
8327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8328 * @param off Offset from the top of the stack. This is zero
8329 * except in the retf case.
8330 * @param cbMem The number of bytes to pop from the stack.
8331 * @param ppvMem Where to return the pointer to the stack memory.
8332 * @param pbUnmapInfo Where to store unmap info for
8333 * iemMemStackPopDoneSpecial.
8334 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8335 * return this because all use of this function is
8336 * to retrieve a new value and anything we return
8337 * here would be discarded.)
8338 */
8339VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8340 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8341{
8342 Assert(cbMem < UINT8_MAX);
8343
8344 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8345 RTGCPTR GCPtrTop;
8346 if (IEM_IS_64BIT_CODE(pVCpu))
8347 GCPtrTop = uCurNewRsp;
8348 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8349 GCPtrTop = (uint32_t)uCurNewRsp;
8350 else
8351 GCPtrTop = (uint16_t)uCurNewRsp;
8352
8353 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8354 0 /* checked in iemMemStackPopBeginSpecial */);
8355}
8356
8357
8358/**
8359 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8360 * iemMemStackPopContinueSpecial).
8361 *
8362 * The caller will manually commit the rSP.
8363 *
8364 * @returns Strict VBox status code.
8365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8366 * @param bUnmapInfo Unmap information returned by
8367 * iemMemStackPopBeginSpecial() or
8368 * iemMemStackPopContinueSpecial().
8369 */
8370VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8371{
8372 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8373}
8374
8375
8376/**
8377 * Fetches a system table byte.
8378 *
8379 * @returns Strict VBox status code.
8380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8381 * @param pbDst Where to return the byte.
8382 * @param iSegReg The index of the segment register to use for
8383 * this access. The base and limits are checked.
8384 * @param GCPtrMem The address of the guest memory.
8385 */
8386VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8387{
8388 /* The lazy approach for now... */
8389 uint8_t bUnmapInfo;
8390 uint8_t const *pbSrc;
8391 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8392 if (rc == VINF_SUCCESS)
8393 {
8394 *pbDst = *pbSrc;
8395 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8396 }
8397 return rc;
8398}
8399
8400
8401/**
8402 * Fetches a system table word.
8403 *
8404 * @returns Strict VBox status code.
8405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8406 * @param pu16Dst Where to return the word.
8407 * @param iSegReg The index of the segment register to use for
8408 * this access. The base and limits are checked.
8409 * @param GCPtrMem The address of the guest memory.
8410 */
8411VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8412{
8413 /* The lazy approach for now... */
8414 uint8_t bUnmapInfo;
8415 uint16_t const *pu16Src;
8416 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8417 if (rc == VINF_SUCCESS)
8418 {
8419 *pu16Dst = *pu16Src;
8420 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8421 }
8422 return rc;
8423}
8424
8425
8426/**
8427 * Fetches a system table dword.
8428 *
8429 * @returns Strict VBox status code.
8430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8431 * @param pu32Dst Where to return the dword.
8432 * @param iSegReg The index of the segment register to use for
8433 * this access. The base and limits are checked.
8434 * @param GCPtrMem The address of the guest memory.
8435 */
8436VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8437{
8438 /* The lazy approach for now... */
8439 uint8_t bUnmapInfo;
8440 uint32_t const *pu32Src;
8441 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8442 if (rc == VINF_SUCCESS)
8443 {
8444 *pu32Dst = *pu32Src;
8445 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8446 }
8447 return rc;
8448}
8449
8450
8451/**
8452 * Fetches a system table qword.
8453 *
8454 * @returns Strict VBox status code.
8455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8456 * @param pu64Dst Where to return the qword.
8457 * @param iSegReg The index of the segment register to use for
8458 * this access. The base and limits are checked.
8459 * @param GCPtrMem The address of the guest memory.
8460 */
8461VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8462{
8463 /* The lazy approach for now... */
8464 uint8_t bUnmapInfo;
8465 uint64_t const *pu64Src;
8466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8467 if (rc == VINF_SUCCESS)
8468 {
8469 *pu64Dst = *pu64Src;
8470 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8471 }
8472 return rc;
8473}
8474
8475
8476/**
8477 * Fetches a descriptor table entry with caller specified error code.
8478 *
8479 * @returns Strict VBox status code.
8480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8481 * @param pDesc Where to return the descriptor table entry.
8482 * @param uSel The selector which table entry to fetch.
8483 * @param uXcpt The exception to raise on table lookup error.
8484 * @param uErrorCode The error code associated with the exception.
8485 */
8486static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8487 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8488{
8489 AssertPtr(pDesc);
8490 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8491
8492 /** @todo did the 286 require all 8 bytes to be accessible? */
8493 /*
8494 * Get the selector table base and check bounds.
8495 */
8496 RTGCPTR GCPtrBase;
8497 if (uSel & X86_SEL_LDT)
8498 {
8499 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8500 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8501 {
8502 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8503 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8504 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8505 uErrorCode, 0);
8506 }
8507
8508 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8509 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8510 }
8511 else
8512 {
8513 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8514 {
8515 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8516 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8517 uErrorCode, 0);
8518 }
8519 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8520 }
8521
8522 /*
8523 * Read the legacy descriptor and maybe the long mode extensions if
8524 * required.
8525 */
8526 VBOXSTRICTRC rcStrict;
8527 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8528 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8529 else
8530 {
8531 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8532 if (rcStrict == VINF_SUCCESS)
8533 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8534 if (rcStrict == VINF_SUCCESS)
8535 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8536 if (rcStrict == VINF_SUCCESS)
8537 pDesc->Legacy.au16[3] = 0;
8538 else
8539 return rcStrict;
8540 }
8541
8542 if (rcStrict == VINF_SUCCESS)
8543 {
8544 if ( !IEM_IS_LONG_MODE(pVCpu)
8545 || pDesc->Legacy.Gen.u1DescType)
8546 pDesc->Long.au64[1] = 0;
8547 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8548 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8549 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8550 else
8551 {
8552 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8553 /** @todo is this the right exception? */
8554 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8555 }
8556 }
8557 return rcStrict;
8558}
8559
8560
8561/**
8562 * Fetches a descriptor table entry.
8563 *
8564 * @returns Strict VBox status code.
8565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8566 * @param pDesc Where to return the descriptor table entry.
8567 * @param uSel The selector which table entry to fetch.
8568 * @param uXcpt The exception to raise on table lookup error.
8569 */
8570VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8571{
8572 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8573}
8574
8575
8576/**
8577 * Marks the selector descriptor as accessed (only non-system descriptors).
8578 *
8579 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8580 * will therefore skip the limit checks.
8581 *
8582 * @returns Strict VBox status code.
8583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8584 * @param uSel The selector.
8585 */
8586VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8587{
8588 /*
8589 * Get the selector table base and calculate the entry address.
8590 */
8591 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8592 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8593 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8594 GCPtr += uSel & X86_SEL_MASK;
8595
8596 /*
8597 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8598 * ugly stuff to avoid this. This will make sure it's an atomic access
8599 * as well more or less remove any question about 8-bit or 32-bit accesss.
8600 */
8601 VBOXSTRICTRC rcStrict;
8602 uint8_t bUnmapInfo;
8603 uint32_t volatile *pu32;
8604 if ((GCPtr & 3) == 0)
8605 {
8606 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8607 GCPtr += 2 + 2;
8608 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8609 if (rcStrict != VINF_SUCCESS)
8610 return rcStrict;
8611 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8612 }
8613 else
8614 {
8615 /* The misaligned GDT/LDT case, map the whole thing. */
8616 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8617 if (rcStrict != VINF_SUCCESS)
8618 return rcStrict;
8619 switch ((uintptr_t)pu32 & 3)
8620 {
8621 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8622 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8623 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8624 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8625 }
8626 }
8627
8628 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8629}
8630
8631
8632#undef LOG_GROUP
8633#define LOG_GROUP LOG_GROUP_IEM
8634
8635/** @} */
8636
8637/** @name Opcode Helpers.
8638 * @{
8639 */
8640
8641/**
8642 * Calculates the effective address of a ModR/M memory operand.
8643 *
8644 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8645 *
8646 * @return Strict VBox status code.
8647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8648 * @param bRm The ModRM byte.
8649 * @param cbImmAndRspOffset - First byte: The size of any immediate
8650 * following the effective address opcode bytes
8651 * (only for RIP relative addressing).
8652 * - Second byte: RSP displacement (for POP [ESP]).
8653 * @param pGCPtrEff Where to return the effective address.
8654 */
8655VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8656{
8657 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8658# define SET_SS_DEF() \
8659 do \
8660 { \
8661 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8662 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8663 } while (0)
8664
8665 if (!IEM_IS_64BIT_CODE(pVCpu))
8666 {
8667/** @todo Check the effective address size crap! */
8668 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8669 {
8670 uint16_t u16EffAddr;
8671
8672 /* Handle the disp16 form with no registers first. */
8673 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8674 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8675 else
8676 {
8677 /* Get the displacment. */
8678 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8679 {
8680 case 0: u16EffAddr = 0; break;
8681 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8682 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8683 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8684 }
8685
8686 /* Add the base and index registers to the disp. */
8687 switch (bRm & X86_MODRM_RM_MASK)
8688 {
8689 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8690 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8691 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8692 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8693 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8694 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8695 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8696 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8697 }
8698 }
8699
8700 *pGCPtrEff = u16EffAddr;
8701 }
8702 else
8703 {
8704 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8705 uint32_t u32EffAddr;
8706
8707 /* Handle the disp32 form with no registers first. */
8708 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8709 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8710 else
8711 {
8712 /* Get the register (or SIB) value. */
8713 switch ((bRm & X86_MODRM_RM_MASK))
8714 {
8715 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8716 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8717 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8718 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8719 case 4: /* SIB */
8720 {
8721 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8722
8723 /* Get the index and scale it. */
8724 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8725 {
8726 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8727 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8728 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8729 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8730 case 4: u32EffAddr = 0; /*none */ break;
8731 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8732 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8733 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8735 }
8736 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8737
8738 /* add base */
8739 switch (bSib & X86_SIB_BASE_MASK)
8740 {
8741 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8742 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8743 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8744 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8745 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8746 case 5:
8747 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8748 {
8749 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8750 SET_SS_DEF();
8751 }
8752 else
8753 {
8754 uint32_t u32Disp;
8755 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8756 u32EffAddr += u32Disp;
8757 }
8758 break;
8759 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8760 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8761 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8762 }
8763 break;
8764 }
8765 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8766 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8767 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8768 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8769 }
8770
8771 /* Get and add the displacement. */
8772 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8773 {
8774 case 0:
8775 break;
8776 case 1:
8777 {
8778 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8779 u32EffAddr += i8Disp;
8780 break;
8781 }
8782 case 2:
8783 {
8784 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8785 u32EffAddr += u32Disp;
8786 break;
8787 }
8788 default:
8789 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8790 }
8791
8792 }
8793 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8794 *pGCPtrEff = u32EffAddr;
8795 }
8796 }
8797 else
8798 {
8799 uint64_t u64EffAddr;
8800
8801 /* Handle the rip+disp32 form with no registers first. */
8802 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8803 {
8804 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8805 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8806 }
8807 else
8808 {
8809 /* Get the register (or SIB) value. */
8810 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8811 {
8812 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8813 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8814 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8815 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8816 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8817 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8818 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8819 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8820 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8821 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8822 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8823 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8824 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8825 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8826 /* SIB */
8827 case 4:
8828 case 12:
8829 {
8830 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8831
8832 /* Get the index and scale it. */
8833 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8834 {
8835 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8836 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8837 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8838 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8839 case 4: u64EffAddr = 0; /*none */ break;
8840 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8841 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8842 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8843 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8844 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8845 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8846 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8847 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8848 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8849 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8850 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8851 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8852 }
8853 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8854
8855 /* add base */
8856 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8857 {
8858 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8859 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8860 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8861 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8862 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8863 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8864 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8865 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8866 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8867 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8868 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8869 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8870 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8871 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8872 /* complicated encodings */
8873 case 5:
8874 case 13:
8875 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8876 {
8877 if (!pVCpu->iem.s.uRexB)
8878 {
8879 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8880 SET_SS_DEF();
8881 }
8882 else
8883 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8884 }
8885 else
8886 {
8887 uint32_t u32Disp;
8888 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8889 u64EffAddr += (int32_t)u32Disp;
8890 }
8891 break;
8892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8893 }
8894 break;
8895 }
8896 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8897 }
8898
8899 /* Get and add the displacement. */
8900 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8901 {
8902 case 0:
8903 break;
8904 case 1:
8905 {
8906 int8_t i8Disp;
8907 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8908 u64EffAddr += i8Disp;
8909 break;
8910 }
8911 case 2:
8912 {
8913 uint32_t u32Disp;
8914 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8915 u64EffAddr += (int32_t)u32Disp;
8916 break;
8917 }
8918 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8919 }
8920
8921 }
8922
8923 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8924 *pGCPtrEff = u64EffAddr;
8925 else
8926 {
8927 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8928 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8929 }
8930 }
8931
8932 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8933 return VINF_SUCCESS;
8934}
8935
8936
8937#ifdef IEM_WITH_SETJMP
8938/**
8939 * Calculates the effective address of a ModR/M memory operand.
8940 *
8941 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8942 *
8943 * May longjmp on internal error.
8944 *
8945 * @return The effective address.
8946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8947 * @param bRm The ModRM byte.
8948 * @param cbImmAndRspOffset - First byte: The size of any immediate
8949 * following the effective address opcode bytes
8950 * (only for RIP relative addressing).
8951 * - Second byte: RSP displacement (for POP [ESP]).
8952 */
8953RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8954{
8955 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8956# define SET_SS_DEF() \
8957 do \
8958 { \
8959 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8960 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8961 } while (0)
8962
8963 if (!IEM_IS_64BIT_CODE(pVCpu))
8964 {
8965/** @todo Check the effective address size crap! */
8966 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8967 {
8968 uint16_t u16EffAddr;
8969
8970 /* Handle the disp16 form with no registers first. */
8971 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8972 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8973 else
8974 {
8975 /* Get the displacment. */
8976 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8977 {
8978 case 0: u16EffAddr = 0; break;
8979 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8980 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8981 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8982 }
8983
8984 /* Add the base and index registers to the disp. */
8985 switch (bRm & X86_MODRM_RM_MASK)
8986 {
8987 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8988 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8989 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8990 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8991 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8992 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8993 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8994 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8995 }
8996 }
8997
8998 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8999 return u16EffAddr;
9000 }
9001
9002 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9003 uint32_t u32EffAddr;
9004
9005 /* Handle the disp32 form with no registers first. */
9006 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9007 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9008 else
9009 {
9010 /* Get the register (or SIB) value. */
9011 switch ((bRm & X86_MODRM_RM_MASK))
9012 {
9013 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9014 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9015 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9016 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9017 case 4: /* SIB */
9018 {
9019 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9020
9021 /* Get the index and scale it. */
9022 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9023 {
9024 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9025 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9026 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9027 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9028 case 4: u32EffAddr = 0; /*none */ break;
9029 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9030 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9031 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9032 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9033 }
9034 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9035
9036 /* add base */
9037 switch (bSib & X86_SIB_BASE_MASK)
9038 {
9039 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9040 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9041 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9042 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9043 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9044 case 5:
9045 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9046 {
9047 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9048 SET_SS_DEF();
9049 }
9050 else
9051 {
9052 uint32_t u32Disp;
9053 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9054 u32EffAddr += u32Disp;
9055 }
9056 break;
9057 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9058 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9059 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9060 }
9061 break;
9062 }
9063 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9064 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9065 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9066 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9067 }
9068
9069 /* Get and add the displacement. */
9070 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9071 {
9072 case 0:
9073 break;
9074 case 1:
9075 {
9076 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9077 u32EffAddr += i8Disp;
9078 break;
9079 }
9080 case 2:
9081 {
9082 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9083 u32EffAddr += u32Disp;
9084 break;
9085 }
9086 default:
9087 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9088 }
9089 }
9090
9091 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9092 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9093 return u32EffAddr;
9094 }
9095
9096 uint64_t u64EffAddr;
9097
9098 /* Handle the rip+disp32 form with no registers first. */
9099 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9100 {
9101 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9102 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9103 }
9104 else
9105 {
9106 /* Get the register (or SIB) value. */
9107 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9108 {
9109 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9110 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9111 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9112 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9113 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9114 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9115 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9116 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9117 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9118 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9119 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9120 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9121 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9122 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9123 /* SIB */
9124 case 4:
9125 case 12:
9126 {
9127 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9128
9129 /* Get the index and scale it. */
9130 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9131 {
9132 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9133 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9134 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9135 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9136 case 4: u64EffAddr = 0; /*none */ break;
9137 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9138 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9139 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9140 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9141 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9142 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9143 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9144 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9145 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9146 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9147 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9148 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9149 }
9150 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9151
9152 /* add base */
9153 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9154 {
9155 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9156 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9157 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9158 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9159 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9160 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9161 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9162 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9163 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9164 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9165 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9166 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9167 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9168 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9169 /* complicated encodings */
9170 case 5:
9171 case 13:
9172 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9173 {
9174 if (!pVCpu->iem.s.uRexB)
9175 {
9176 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9177 SET_SS_DEF();
9178 }
9179 else
9180 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9181 }
9182 else
9183 {
9184 uint32_t u32Disp;
9185 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9186 u64EffAddr += (int32_t)u32Disp;
9187 }
9188 break;
9189 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9190 }
9191 break;
9192 }
9193 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9194 }
9195
9196 /* Get and add the displacement. */
9197 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9198 {
9199 case 0:
9200 break;
9201 case 1:
9202 {
9203 int8_t i8Disp;
9204 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9205 u64EffAddr += i8Disp;
9206 break;
9207 }
9208 case 2:
9209 {
9210 uint32_t u32Disp;
9211 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9212 u64EffAddr += (int32_t)u32Disp;
9213 break;
9214 }
9215 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9216 }
9217
9218 }
9219
9220 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9221 {
9222 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9223 return u64EffAddr;
9224 }
9225 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9226 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9227 return u64EffAddr & UINT32_MAX;
9228}
9229#endif /* IEM_WITH_SETJMP */
9230
9231
9232/**
9233 * Calculates the effective address of a ModR/M memory operand, extended version
9234 * for use in the recompilers.
9235 *
9236 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9237 *
9238 * @return Strict VBox status code.
9239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9240 * @param bRm The ModRM byte.
9241 * @param cbImmAndRspOffset - First byte: The size of any immediate
9242 * following the effective address opcode bytes
9243 * (only for RIP relative addressing).
9244 * - Second byte: RSP displacement (for POP [ESP]).
9245 * @param pGCPtrEff Where to return the effective address.
9246 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9247 * SIB byte (bits 39:32).
9248 */
9249VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9250{
9251 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9252# define SET_SS_DEF() \
9253 do \
9254 { \
9255 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9256 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9257 } while (0)
9258
9259 uint64_t uInfo;
9260 if (!IEM_IS_64BIT_CODE(pVCpu))
9261 {
9262/** @todo Check the effective address size crap! */
9263 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9264 {
9265 uint16_t u16EffAddr;
9266
9267 /* Handle the disp16 form with no registers first. */
9268 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9269 {
9270 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9271 uInfo = u16EffAddr;
9272 }
9273 else
9274 {
9275 /* Get the displacment. */
9276 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9277 {
9278 case 0: u16EffAddr = 0; break;
9279 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9280 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9281 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9282 }
9283 uInfo = u16EffAddr;
9284
9285 /* Add the base and index registers to the disp. */
9286 switch (bRm & X86_MODRM_RM_MASK)
9287 {
9288 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9289 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9290 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9291 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9292 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9293 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9294 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9295 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9296 }
9297 }
9298
9299 *pGCPtrEff = u16EffAddr;
9300 }
9301 else
9302 {
9303 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9304 uint32_t u32EffAddr;
9305
9306 /* Handle the disp32 form with no registers first. */
9307 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9308 {
9309 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9310 uInfo = u32EffAddr;
9311 }
9312 else
9313 {
9314 /* Get the register (or SIB) value. */
9315 uInfo = 0;
9316 switch ((bRm & X86_MODRM_RM_MASK))
9317 {
9318 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9319 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9320 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9321 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9322 case 4: /* SIB */
9323 {
9324 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9325 uInfo = (uint64_t)bSib << 32;
9326
9327 /* Get the index and scale it. */
9328 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9329 {
9330 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9331 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9332 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9333 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9334 case 4: u32EffAddr = 0; /*none */ break;
9335 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9336 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9337 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9339 }
9340 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9341
9342 /* add base */
9343 switch (bSib & X86_SIB_BASE_MASK)
9344 {
9345 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9346 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9347 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9348 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9349 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9350 case 5:
9351 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9352 {
9353 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9354 SET_SS_DEF();
9355 }
9356 else
9357 {
9358 uint32_t u32Disp;
9359 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9360 u32EffAddr += u32Disp;
9361 uInfo |= u32Disp;
9362 }
9363 break;
9364 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9365 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9366 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9367 }
9368 break;
9369 }
9370 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9371 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9372 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9373 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9374 }
9375
9376 /* Get and add the displacement. */
9377 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9378 {
9379 case 0:
9380 break;
9381 case 1:
9382 {
9383 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9384 u32EffAddr += i8Disp;
9385 uInfo |= (uint32_t)(int32_t)i8Disp;
9386 break;
9387 }
9388 case 2:
9389 {
9390 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9391 u32EffAddr += u32Disp;
9392 uInfo |= (uint32_t)u32Disp;
9393 break;
9394 }
9395 default:
9396 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9397 }
9398
9399 }
9400 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9401 *pGCPtrEff = u32EffAddr;
9402 }
9403 }
9404 else
9405 {
9406 uint64_t u64EffAddr;
9407
9408 /* Handle the rip+disp32 form with no registers first. */
9409 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9410 {
9411 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9412 uInfo = (uint32_t)u64EffAddr;
9413 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9414 }
9415 else
9416 {
9417 /* Get the register (or SIB) value. */
9418 uInfo = 0;
9419 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9420 {
9421 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9422 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9423 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9424 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9425 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9426 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9427 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9428 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9429 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9430 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9431 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9432 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9433 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9434 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9435 /* SIB */
9436 case 4:
9437 case 12:
9438 {
9439 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9440 uInfo = (uint64_t)bSib << 32;
9441
9442 /* Get the index and scale it. */
9443 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9444 {
9445 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9446 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9447 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9448 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9449 case 4: u64EffAddr = 0; /*none */ break;
9450 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9451 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9452 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9453 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9454 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9455 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9456 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9457 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9458 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9459 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9460 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9461 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9462 }
9463 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9464
9465 /* add base */
9466 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9467 {
9468 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9469 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9470 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9471 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9472 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9473 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9474 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9475 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9476 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9477 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9478 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9479 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9480 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9481 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9482 /* complicated encodings */
9483 case 5:
9484 case 13:
9485 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9486 {
9487 if (!pVCpu->iem.s.uRexB)
9488 {
9489 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9490 SET_SS_DEF();
9491 }
9492 else
9493 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9494 }
9495 else
9496 {
9497 uint32_t u32Disp;
9498 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9499 u64EffAddr += (int32_t)u32Disp;
9500 uInfo |= u32Disp;
9501 }
9502 break;
9503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9504 }
9505 break;
9506 }
9507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9508 }
9509
9510 /* Get and add the displacement. */
9511 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9512 {
9513 case 0:
9514 break;
9515 case 1:
9516 {
9517 int8_t i8Disp;
9518 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9519 u64EffAddr += i8Disp;
9520 uInfo |= (uint32_t)(int32_t)i8Disp;
9521 break;
9522 }
9523 case 2:
9524 {
9525 uint32_t u32Disp;
9526 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9527 u64EffAddr += (int32_t)u32Disp;
9528 uInfo |= u32Disp;
9529 break;
9530 }
9531 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9532 }
9533
9534 }
9535
9536 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9537 *pGCPtrEff = u64EffAddr;
9538 else
9539 {
9540 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9541 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9542 }
9543 }
9544 *puInfo = uInfo;
9545
9546 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9547 return VINF_SUCCESS;
9548}
9549
9550/** @} */
9551
9552
9553#ifdef LOG_ENABLED
9554/**
9555 * Logs the current instruction.
9556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9557 * @param fSameCtx Set if we have the same context information as the VMM,
9558 * clear if we may have already executed an instruction in
9559 * our debug context. When clear, we assume IEMCPU holds
9560 * valid CPU mode info.
9561 *
9562 * The @a fSameCtx parameter is now misleading and obsolete.
9563 * @param pszFunction The IEM function doing the execution.
9564 */
9565static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9566{
9567# ifdef IN_RING3
9568 if (LogIs2Enabled())
9569 {
9570 char szInstr[256];
9571 uint32_t cbInstr = 0;
9572 if (fSameCtx)
9573 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9574 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9575 szInstr, sizeof(szInstr), &cbInstr);
9576 else
9577 {
9578 uint32_t fFlags = 0;
9579 switch (IEM_GET_CPU_MODE(pVCpu))
9580 {
9581 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9582 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9583 case IEMMODE_16BIT:
9584 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9585 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9586 else
9587 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9588 break;
9589 }
9590 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9591 szInstr, sizeof(szInstr), &cbInstr);
9592 }
9593
9594 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9595 Log2(("**** %s fExec=%x\n"
9596 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9597 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9598 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9599 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9600 " %s\n"
9601 , pszFunction, pVCpu->iem.s.fExec,
9602 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9603 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9604 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9605 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9606 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9607 szInstr));
9608
9609 /* This stuff sucks atm. as it fills the log with MSRs. */
9610 //if (LogIs3Enabled())
9611 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9612 }
9613 else
9614# endif
9615 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9616 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9617 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9618}
9619#endif /* LOG_ENABLED */
9620
9621
9622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9623/**
9624 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9625 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9626 *
9627 * @returns Modified rcStrict.
9628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9629 * @param rcStrict The instruction execution status.
9630 */
9631static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9632{
9633 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9634 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9635 {
9636 /* VMX preemption timer takes priority over NMI-window exits. */
9637 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9638 {
9639 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9640 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9641 }
9642 /*
9643 * Check remaining intercepts.
9644 *
9645 * NMI-window and Interrupt-window VM-exits.
9646 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9647 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9648 *
9649 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9650 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9651 */
9652 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9653 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9654 && !TRPMHasTrap(pVCpu))
9655 {
9656 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9657 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9658 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9659 {
9660 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9661 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9662 }
9663 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9664 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9665 {
9666 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9667 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9668 }
9669 }
9670 }
9671 /* TPR-below threshold/APIC write has the highest priority. */
9672 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9673 {
9674 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9675 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9676 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9677 }
9678 /* MTF takes priority over VMX-preemption timer. */
9679 else
9680 {
9681 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9682 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9683 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9684 }
9685 return rcStrict;
9686}
9687#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9688
9689
9690/**
9691 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
9692 * IEMExecOneBypass and friends.
9693 *
9694 * Similar code is found in IEMExecLots.
9695 *
9696 * @return Strict VBox status code.
9697 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9698 * @param fExecuteInhibit If set, execute the instruction following CLI,
9699 * POP SS and MOV SS,GR.
9700 * @param pszFunction The calling function name.
9701 */
9702DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9703{
9704 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9705 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9706 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9707 RT_NOREF_PV(pszFunction);
9708
9709#ifdef IEM_WITH_SETJMP
9710 VBOXSTRICTRC rcStrict;
9711 IEM_TRY_SETJMP(pVCpu, rcStrict)
9712 {
9713 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9714 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9715 }
9716 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9717 {
9718 pVCpu->iem.s.cLongJumps++;
9719 }
9720 IEM_CATCH_LONGJMP_END(pVCpu);
9721#else
9722 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9723 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9724#endif
9725 if (rcStrict == VINF_SUCCESS)
9726 pVCpu->iem.s.cInstructions++;
9727 if (pVCpu->iem.s.cActiveMappings > 0)
9728 {
9729 Assert(rcStrict != VINF_SUCCESS);
9730 iemMemRollback(pVCpu);
9731 }
9732 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9733 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9734 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9735
9736//#ifdef DEBUG
9737// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9738//#endif
9739
9740#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9741 /*
9742 * Perform any VMX nested-guest instruction boundary actions.
9743 *
9744 * If any of these causes a VM-exit, we must skip executing the next
9745 * instruction (would run into stale page tables). A VM-exit makes sure
9746 * there is no interrupt-inhibition, so that should ensure we don't go
9747 * to try execute the next instruction. Clearing fExecuteInhibit is
9748 * problematic because of the setjmp/longjmp clobbering above.
9749 */
9750 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9751 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9752 || rcStrict != VINF_SUCCESS)
9753 { /* likely */ }
9754 else
9755 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9756#endif
9757
9758 /* Execute the next instruction as well if a cli, pop ss or
9759 mov ss, Gr has just completed successfully. */
9760 if ( fExecuteInhibit
9761 && rcStrict == VINF_SUCCESS
9762 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9763 {
9764 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9765 if (rcStrict == VINF_SUCCESS)
9766 {
9767#ifdef LOG_ENABLED
9768 iemLogCurInstr(pVCpu, false, pszFunction);
9769#endif
9770#ifdef IEM_WITH_SETJMP
9771 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9772 {
9773 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9774 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9775 }
9776 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9777 {
9778 pVCpu->iem.s.cLongJumps++;
9779 }
9780 IEM_CATCH_LONGJMP_END(pVCpu);
9781#else
9782 IEM_OPCODE_GET_FIRST_U8(&b);
9783 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9784#endif
9785 if (rcStrict == VINF_SUCCESS)
9786 {
9787 pVCpu->iem.s.cInstructions++;
9788#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9789 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9790 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9791 { /* likely */ }
9792 else
9793 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9794#endif
9795 }
9796 if (pVCpu->iem.s.cActiveMappings > 0)
9797 {
9798 Assert(rcStrict != VINF_SUCCESS);
9799 iemMemRollback(pVCpu);
9800 }
9801 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9802 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9803 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9804 }
9805 else if (pVCpu->iem.s.cActiveMappings > 0)
9806 iemMemRollback(pVCpu);
9807 /** @todo drop this after we bake this change into RIP advancing. */
9808 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9809 }
9810
9811 /*
9812 * Return value fiddling, statistics and sanity assertions.
9813 */
9814 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9815
9816 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9817 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9818 return rcStrict;
9819}
9820
9821
9822/**
9823 * Execute one instruction.
9824 *
9825 * @return Strict VBox status code.
9826 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9827 */
9828VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9829{
9830 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9831#ifdef LOG_ENABLED
9832 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9833#endif
9834
9835 /*
9836 * Do the decoding and emulation.
9837 */
9838 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9841 else if (pVCpu->iem.s.cActiveMappings > 0)
9842 iemMemRollback(pVCpu);
9843
9844 if (rcStrict != VINF_SUCCESS)
9845 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9846 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9847 return rcStrict;
9848}
9849
9850
9851VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9852 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9853{
9854 VBOXSTRICTRC rcStrict;
9855 if ( cbOpcodeBytes
9856 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9857 {
9858 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9859#ifdef IEM_WITH_CODE_TLB
9860 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9861 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9862 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9863 pVCpu->iem.s.offCurInstrStart = 0;
9864 pVCpu->iem.s.offInstrNextByte = 0;
9865 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9866#else
9867 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9868 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9869#endif
9870 rcStrict = VINF_SUCCESS;
9871 }
9872 else
9873 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9874 if (rcStrict == VINF_SUCCESS)
9875 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9876 else if (pVCpu->iem.s.cActiveMappings > 0)
9877 iemMemRollback(pVCpu);
9878
9879 return rcStrict;
9880}
9881
9882
9883VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
9884{
9885 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9886 if (rcStrict == VINF_SUCCESS)
9887 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
9888 else if (pVCpu->iem.s.cActiveMappings > 0)
9889 iemMemRollback(pVCpu);
9890
9891 return rcStrict;
9892}
9893
9894
9895VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9896 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9897{
9898 VBOXSTRICTRC rcStrict;
9899 if ( cbOpcodeBytes
9900 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9901 {
9902 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9903#ifdef IEM_WITH_CODE_TLB
9904 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9905 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9906 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9907 pVCpu->iem.s.offCurInstrStart = 0;
9908 pVCpu->iem.s.offInstrNextByte = 0;
9909 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9910#else
9911 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9912 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9913#endif
9914 rcStrict = VINF_SUCCESS;
9915 }
9916 else
9917 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9918 if (rcStrict == VINF_SUCCESS)
9919 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9920 else if (pVCpu->iem.s.cActiveMappings > 0)
9921 iemMemRollback(pVCpu);
9922
9923 return rcStrict;
9924}
9925
9926
9927/**
9928 * For handling split cacheline lock operations when the host has split-lock
9929 * detection enabled.
9930 *
9931 * This will cause the interpreter to disregard the lock prefix and implicit
9932 * locking (xchg).
9933 *
9934 * @returns Strict VBox status code.
9935 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9936 */
9937VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9938{
9939 /*
9940 * Do the decoding and emulation.
9941 */
9942 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9943 if (rcStrict == VINF_SUCCESS)
9944 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9945 else if (pVCpu->iem.s.cActiveMappings > 0)
9946 iemMemRollback(pVCpu);
9947
9948 if (rcStrict != VINF_SUCCESS)
9949 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9950 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9951 return rcStrict;
9952}
9953
9954
9955/**
9956 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9957 * inject a pending TRPM trap.
9958 */
9959VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9960{
9961 Assert(TRPMHasTrap(pVCpu));
9962
9963 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9964 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9965 {
9966 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9967#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9968 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9969 if (fIntrEnabled)
9970 {
9971 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9972 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9973 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9974 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9975 else
9976 {
9977 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9978 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9979 }
9980 }
9981#else
9982 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9983#endif
9984 if (fIntrEnabled)
9985 {
9986 uint8_t u8TrapNo;
9987 TRPMEVENT enmType;
9988 uint32_t uErrCode;
9989 RTGCPTR uCr2;
9990 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9991 AssertRC(rc2);
9992 Assert(enmType == TRPM_HARDWARE_INT);
9993 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9994
9995 TRPMResetTrap(pVCpu);
9996
9997#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9998 /* Injecting an event may cause a VM-exit. */
9999 if ( rcStrict != VINF_SUCCESS
10000 && rcStrict != VINF_IEM_RAISED_XCPT)
10001 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10002#else
10003 NOREF(rcStrict);
10004#endif
10005 }
10006 }
10007
10008 return VINF_SUCCESS;
10009}
10010
10011
10012VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10013{
10014 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10015 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10016 Assert(cMaxInstructions > 0);
10017
10018 /*
10019 * See if there is an interrupt pending in TRPM, inject it if we can.
10020 */
10021 /** @todo What if we are injecting an exception and not an interrupt? Is that
10022 * possible here? For now we assert it is indeed only an interrupt. */
10023 if (!TRPMHasTrap(pVCpu))
10024 { /* likely */ }
10025 else
10026 {
10027 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10028 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10029 { /*likely */ }
10030 else
10031 return rcStrict;
10032 }
10033
10034 /*
10035 * Initial decoder init w/ prefetch, then setup setjmp.
10036 */
10037 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10038 if (rcStrict == VINF_SUCCESS)
10039 {
10040#ifdef IEM_WITH_SETJMP
10041 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10042 IEM_TRY_SETJMP(pVCpu, rcStrict)
10043#endif
10044 {
10045 /*
10046 * The run loop. We limit ourselves to 4096 instructions right now.
10047 */
10048 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10049 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10050 for (;;)
10051 {
10052 /*
10053 * Log the state.
10054 */
10055#ifdef LOG_ENABLED
10056 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10057#endif
10058
10059 /*
10060 * Do the decoding and emulation.
10061 */
10062 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10063 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10064#ifdef VBOX_STRICT
10065 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10066#endif
10067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10068 {
10069 Assert(pVCpu->iem.s.cActiveMappings == 0);
10070 pVCpu->iem.s.cInstructions++;
10071
10072#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10073 /* Perform any VMX nested-guest instruction boundary actions. */
10074 uint64_t fCpu = pVCpu->fLocalForcedActions;
10075 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10076 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10077 { /* likely */ }
10078 else
10079 {
10080 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10081 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10082 fCpu = pVCpu->fLocalForcedActions;
10083 else
10084 {
10085 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10086 break;
10087 }
10088 }
10089#endif
10090 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10091 {
10092#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10093 uint64_t fCpu = pVCpu->fLocalForcedActions;
10094#endif
10095 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10096 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10097 | VMCPU_FF_TLB_FLUSH
10098 | VMCPU_FF_UNHALT );
10099
10100 if (RT_LIKELY( ( !fCpu
10101 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10102 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10103 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10104 {
10105 if (--cMaxInstructionsGccStupidity > 0)
10106 {
10107 /* Poll timers every now an then according to the caller's specs. */
10108 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10109 || !TMTimerPollBool(pVM, pVCpu))
10110 {
10111 Assert(pVCpu->iem.s.cActiveMappings == 0);
10112 iemReInitDecoder(pVCpu);
10113 continue;
10114 }
10115 }
10116 }
10117 }
10118 Assert(pVCpu->iem.s.cActiveMappings == 0);
10119 }
10120 else if (pVCpu->iem.s.cActiveMappings > 0)
10121 iemMemRollback(pVCpu);
10122 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10123 break;
10124 }
10125 }
10126#ifdef IEM_WITH_SETJMP
10127 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10128 {
10129 if (pVCpu->iem.s.cActiveMappings > 0)
10130 iemMemRollback(pVCpu);
10131# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10132 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10133# endif
10134 pVCpu->iem.s.cLongJumps++;
10135 }
10136 IEM_CATCH_LONGJMP_END(pVCpu);
10137#endif
10138
10139 /*
10140 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10141 */
10142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10144 }
10145 else
10146 {
10147 if (pVCpu->iem.s.cActiveMappings > 0)
10148 iemMemRollback(pVCpu);
10149
10150#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10151 /*
10152 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10153 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10154 */
10155 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10156#endif
10157 }
10158
10159 /*
10160 * Maybe re-enter raw-mode and log.
10161 */
10162 if (rcStrict != VINF_SUCCESS)
10163 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10164 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10165 if (pcInstructions)
10166 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10167 return rcStrict;
10168}
10169
10170
10171/**
10172 * Interface used by EMExecuteExec, does exit statistics and limits.
10173 *
10174 * @returns Strict VBox status code.
10175 * @param pVCpu The cross context virtual CPU structure.
10176 * @param fWillExit To be defined.
10177 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10178 * @param cMaxInstructions Maximum number of instructions to execute.
10179 * @param cMaxInstructionsWithoutExits
10180 * The max number of instructions without exits.
10181 * @param pStats Where to return statistics.
10182 */
10183VMM_INT_DECL(VBOXSTRICTRC)
10184IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10185 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10186{
10187 NOREF(fWillExit); /** @todo define flexible exit crits */
10188
10189 /*
10190 * Initialize return stats.
10191 */
10192 pStats->cInstructions = 0;
10193 pStats->cExits = 0;
10194 pStats->cMaxExitDistance = 0;
10195 pStats->cReserved = 0;
10196
10197 /*
10198 * Initial decoder init w/ prefetch, then setup setjmp.
10199 */
10200 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10201 if (rcStrict == VINF_SUCCESS)
10202 {
10203#ifdef IEM_WITH_SETJMP
10204 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10205 IEM_TRY_SETJMP(pVCpu, rcStrict)
10206#endif
10207 {
10208#ifdef IN_RING0
10209 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10210#endif
10211 uint32_t cInstructionSinceLastExit = 0;
10212
10213 /*
10214 * The run loop. We limit ourselves to 4096 instructions right now.
10215 */
10216 PVM pVM = pVCpu->CTX_SUFF(pVM);
10217 for (;;)
10218 {
10219 /*
10220 * Log the state.
10221 */
10222#ifdef LOG_ENABLED
10223 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10224#endif
10225
10226 /*
10227 * Do the decoding and emulation.
10228 */
10229 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10230
10231 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10232 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10233
10234 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10235 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10236 {
10237 pStats->cExits += 1;
10238 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10239 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10240 cInstructionSinceLastExit = 0;
10241 }
10242
10243 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10244 {
10245 Assert(pVCpu->iem.s.cActiveMappings == 0);
10246 pVCpu->iem.s.cInstructions++;
10247 pStats->cInstructions++;
10248 cInstructionSinceLastExit++;
10249
10250#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10251 /* Perform any VMX nested-guest instruction boundary actions. */
10252 uint64_t fCpu = pVCpu->fLocalForcedActions;
10253 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10254 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10255 { /* likely */ }
10256 else
10257 {
10258 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10259 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10260 fCpu = pVCpu->fLocalForcedActions;
10261 else
10262 {
10263 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10264 break;
10265 }
10266 }
10267#endif
10268 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10269 {
10270#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10271 uint64_t fCpu = pVCpu->fLocalForcedActions;
10272#endif
10273 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10274 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10275 | VMCPU_FF_TLB_FLUSH
10276 | VMCPU_FF_UNHALT );
10277 if (RT_LIKELY( ( ( !fCpu
10278 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10279 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10280 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10281 || pStats->cInstructions < cMinInstructions))
10282 {
10283 if (pStats->cInstructions < cMaxInstructions)
10284 {
10285 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10286 {
10287#ifdef IN_RING0
10288 if ( !fCheckPreemptionPending
10289 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10290#endif
10291 {
10292 Assert(pVCpu->iem.s.cActiveMappings == 0);
10293 iemReInitDecoder(pVCpu);
10294 continue;
10295 }
10296#ifdef IN_RING0
10297 rcStrict = VINF_EM_RAW_INTERRUPT;
10298 break;
10299#endif
10300 }
10301 }
10302 }
10303 Assert(!(fCpu & VMCPU_FF_IEM));
10304 }
10305 Assert(pVCpu->iem.s.cActiveMappings == 0);
10306 }
10307 else if (pVCpu->iem.s.cActiveMappings > 0)
10308 iemMemRollback(pVCpu);
10309 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10310 break;
10311 }
10312 }
10313#ifdef IEM_WITH_SETJMP
10314 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10315 {
10316 if (pVCpu->iem.s.cActiveMappings > 0)
10317 iemMemRollback(pVCpu);
10318 pVCpu->iem.s.cLongJumps++;
10319 }
10320 IEM_CATCH_LONGJMP_END(pVCpu);
10321#endif
10322
10323 /*
10324 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10325 */
10326 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10328 }
10329 else
10330 {
10331 if (pVCpu->iem.s.cActiveMappings > 0)
10332 iemMemRollback(pVCpu);
10333
10334#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10335 /*
10336 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10337 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10338 */
10339 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10340#endif
10341 }
10342
10343 /*
10344 * Maybe re-enter raw-mode and log.
10345 */
10346 if (rcStrict != VINF_SUCCESS)
10347 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10348 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10349 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10350 return rcStrict;
10351}
10352
10353
10354/**
10355 * Injects a trap, fault, abort, software interrupt or external interrupt.
10356 *
10357 * The parameter list matches TRPMQueryTrapAll pretty closely.
10358 *
10359 * @returns Strict VBox status code.
10360 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10361 * @param u8TrapNo The trap number.
10362 * @param enmType What type is it (trap/fault/abort), software
10363 * interrupt or hardware interrupt.
10364 * @param uErrCode The error code if applicable.
10365 * @param uCr2 The CR2 value if applicable.
10366 * @param cbInstr The instruction length (only relevant for
10367 * software interrupts).
10368 */
10369VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10370 uint8_t cbInstr)
10371{
10372 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10373#ifdef DBGFTRACE_ENABLED
10374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10375 u8TrapNo, enmType, uErrCode, uCr2);
10376#endif
10377
10378 uint32_t fFlags;
10379 switch (enmType)
10380 {
10381 case TRPM_HARDWARE_INT:
10382 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10383 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10384 uErrCode = uCr2 = 0;
10385 break;
10386
10387 case TRPM_SOFTWARE_INT:
10388 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10389 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10390 uErrCode = uCr2 = 0;
10391 break;
10392
10393 case TRPM_TRAP:
10394 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10395 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10396 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10397 if (u8TrapNo == X86_XCPT_PF)
10398 fFlags |= IEM_XCPT_FLAGS_CR2;
10399 switch (u8TrapNo)
10400 {
10401 case X86_XCPT_DF:
10402 case X86_XCPT_TS:
10403 case X86_XCPT_NP:
10404 case X86_XCPT_SS:
10405 case X86_XCPT_PF:
10406 case X86_XCPT_AC:
10407 case X86_XCPT_GP:
10408 fFlags |= IEM_XCPT_FLAGS_ERR;
10409 break;
10410 }
10411 break;
10412
10413 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10414 }
10415
10416 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10417
10418 if (pVCpu->iem.s.cActiveMappings > 0)
10419 iemMemRollback(pVCpu);
10420
10421 return rcStrict;
10422}
10423
10424
10425/**
10426 * Injects the active TRPM event.
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure.
10430 */
10431VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10432{
10433#ifndef IEM_IMPLEMENTS_TASKSWITCH
10434 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10435#else
10436 uint8_t u8TrapNo;
10437 TRPMEVENT enmType;
10438 uint32_t uErrCode;
10439 RTGCUINTPTR uCr2;
10440 uint8_t cbInstr;
10441 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10442 if (RT_FAILURE(rc))
10443 return rc;
10444
10445 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10446 * ICEBP \#DB injection as a special case. */
10447 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10448#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10449 if (rcStrict == VINF_SVM_VMEXIT)
10450 rcStrict = VINF_SUCCESS;
10451#endif
10452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10453 if (rcStrict == VINF_VMX_VMEXIT)
10454 rcStrict = VINF_SUCCESS;
10455#endif
10456 /** @todo Are there any other codes that imply the event was successfully
10457 * delivered to the guest? See @bugref{6607}. */
10458 if ( rcStrict == VINF_SUCCESS
10459 || rcStrict == VINF_IEM_RAISED_XCPT)
10460 TRPMResetTrap(pVCpu);
10461
10462 return rcStrict;
10463#endif
10464}
10465
10466
10467VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10468{
10469 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10470 return VERR_NOT_IMPLEMENTED;
10471}
10472
10473
10474VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10475{
10476 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10477 return VERR_NOT_IMPLEMENTED;
10478}
10479
10480
10481/**
10482 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10483 *
10484 * This API ASSUMES that the caller has already verified that the guest code is
10485 * allowed to access the I/O port. (The I/O port is in the DX register in the
10486 * guest state.)
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure.
10490 * @param cbValue The size of the I/O port access (1, 2, or 4).
10491 * @param enmAddrMode The addressing mode.
10492 * @param fRepPrefix Indicates whether a repeat prefix is used
10493 * (doesn't matter which for this instruction).
10494 * @param cbInstr The instruction length in bytes.
10495 * @param iEffSeg The effective segment address.
10496 * @param fIoChecked Whether the access to the I/O port has been
10497 * checked or not. It's typically checked in the
10498 * HM scenario.
10499 */
10500VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10501 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10502{
10503 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10504 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10505
10506 /*
10507 * State init.
10508 */
10509 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10510
10511 /*
10512 * Switch orgy for getting to the right handler.
10513 */
10514 VBOXSTRICTRC rcStrict;
10515 if (fRepPrefix)
10516 {
10517 switch (enmAddrMode)
10518 {
10519 case IEMMODE_16BIT:
10520 switch (cbValue)
10521 {
10522 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10523 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10524 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10525 default:
10526 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10527 }
10528 break;
10529
10530 case IEMMODE_32BIT:
10531 switch (cbValue)
10532 {
10533 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10534 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10535 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10536 default:
10537 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10538 }
10539 break;
10540
10541 case IEMMODE_64BIT:
10542 switch (cbValue)
10543 {
10544 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10545 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10546 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10547 default:
10548 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10549 }
10550 break;
10551
10552 default:
10553 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10554 }
10555 }
10556 else
10557 {
10558 switch (enmAddrMode)
10559 {
10560 case IEMMODE_16BIT:
10561 switch (cbValue)
10562 {
10563 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10564 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10565 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10566 default:
10567 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10568 }
10569 break;
10570
10571 case IEMMODE_32BIT:
10572 switch (cbValue)
10573 {
10574 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10575 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10576 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10577 default:
10578 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10579 }
10580 break;
10581
10582 case IEMMODE_64BIT:
10583 switch (cbValue)
10584 {
10585 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10586 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10587 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10588 default:
10589 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10590 }
10591 break;
10592
10593 default:
10594 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10595 }
10596 }
10597
10598 if (pVCpu->iem.s.cActiveMappings)
10599 iemMemRollback(pVCpu);
10600
10601 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10602}
10603
10604
10605/**
10606 * Interface for HM and EM for executing string I/O IN (read) instructions.
10607 *
10608 * This API ASSUMES that the caller has already verified that the guest code is
10609 * allowed to access the I/O port. (The I/O port is in the DX register in the
10610 * guest state.)
10611 *
10612 * @returns Strict VBox status code.
10613 * @param pVCpu The cross context virtual CPU structure.
10614 * @param cbValue The size of the I/O port access (1, 2, or 4).
10615 * @param enmAddrMode The addressing mode.
10616 * @param fRepPrefix Indicates whether a repeat prefix is used
10617 * (doesn't matter which for this instruction).
10618 * @param cbInstr The instruction length in bytes.
10619 * @param fIoChecked Whether the access to the I/O port has been
10620 * checked or not. It's typically checked in the
10621 * HM scenario.
10622 */
10623VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10624 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10625{
10626 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10627
10628 /*
10629 * State init.
10630 */
10631 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10632
10633 /*
10634 * Switch orgy for getting to the right handler.
10635 */
10636 VBOXSTRICTRC rcStrict;
10637 if (fRepPrefix)
10638 {
10639 switch (enmAddrMode)
10640 {
10641 case IEMMODE_16BIT:
10642 switch (cbValue)
10643 {
10644 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10645 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10646 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10647 default:
10648 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10649 }
10650 break;
10651
10652 case IEMMODE_32BIT:
10653 switch (cbValue)
10654 {
10655 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10656 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10657 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10658 default:
10659 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10660 }
10661 break;
10662
10663 case IEMMODE_64BIT:
10664 switch (cbValue)
10665 {
10666 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10667 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10668 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10669 default:
10670 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10671 }
10672 break;
10673
10674 default:
10675 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10676 }
10677 }
10678 else
10679 {
10680 switch (enmAddrMode)
10681 {
10682 case IEMMODE_16BIT:
10683 switch (cbValue)
10684 {
10685 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10686 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10687 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10688 default:
10689 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10690 }
10691 break;
10692
10693 case IEMMODE_32BIT:
10694 switch (cbValue)
10695 {
10696 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10697 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10698 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10699 default:
10700 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10701 }
10702 break;
10703
10704 case IEMMODE_64BIT:
10705 switch (cbValue)
10706 {
10707 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10708 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10709 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10710 default:
10711 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10712 }
10713 break;
10714
10715 default:
10716 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10717 }
10718 }
10719
10720 if ( pVCpu->iem.s.cActiveMappings == 0
10721 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10722 { /* likely */ }
10723 else
10724 {
10725 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10726 iemMemRollback(pVCpu);
10727 }
10728 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10729}
10730
10731
10732/**
10733 * Interface for rawmode to write execute an OUT instruction.
10734 *
10735 * @returns Strict VBox status code.
10736 * @param pVCpu The cross context virtual CPU structure.
10737 * @param cbInstr The instruction length in bytes.
10738 * @param u16Port The port to read.
10739 * @param fImm Whether the port is specified using an immediate operand or
10740 * using the implicit DX register.
10741 * @param cbReg The register size.
10742 *
10743 * @remarks In ring-0 not all of the state needs to be synced in.
10744 */
10745VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10746{
10747 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10748 Assert(cbReg <= 4 && cbReg != 3);
10749
10750 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10751 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10752 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10753 Assert(!pVCpu->iem.s.cActiveMappings);
10754 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10755}
10756
10757
10758/**
10759 * Interface for rawmode to write execute an IN instruction.
10760 *
10761 * @returns Strict VBox status code.
10762 * @param pVCpu The cross context virtual CPU structure.
10763 * @param cbInstr The instruction length in bytes.
10764 * @param u16Port The port to read.
10765 * @param fImm Whether the port is specified using an immediate operand or
10766 * using the implicit DX.
10767 * @param cbReg The register size.
10768 */
10769VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10770{
10771 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10772 Assert(cbReg <= 4 && cbReg != 3);
10773
10774 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10775 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10776 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10777 Assert(!pVCpu->iem.s.cActiveMappings);
10778 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10779}
10780
10781
10782/**
10783 * Interface for HM and EM to write to a CRx register.
10784 *
10785 * @returns Strict VBox status code.
10786 * @param pVCpu The cross context virtual CPU structure.
10787 * @param cbInstr The instruction length in bytes.
10788 * @param iCrReg The control register number (destination).
10789 * @param iGReg The general purpose register number (source).
10790 *
10791 * @remarks In ring-0 not all of the state needs to be synced in.
10792 */
10793VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10794{
10795 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10796 Assert(iCrReg < 16);
10797 Assert(iGReg < 16);
10798
10799 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10800 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10801 Assert(!pVCpu->iem.s.cActiveMappings);
10802 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10803}
10804
10805
10806/**
10807 * Interface for HM and EM to read from a CRx register.
10808 *
10809 * @returns Strict VBox status code.
10810 * @param pVCpu The cross context virtual CPU structure.
10811 * @param cbInstr The instruction length in bytes.
10812 * @param iGReg The general purpose register number (destination).
10813 * @param iCrReg The control register number (source).
10814 *
10815 * @remarks In ring-0 not all of the state needs to be synced in.
10816 */
10817VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10818{
10819 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10820 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10821 | CPUMCTX_EXTRN_APIC_TPR);
10822 Assert(iCrReg < 16);
10823 Assert(iGReg < 16);
10824
10825 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10826 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10827 Assert(!pVCpu->iem.s.cActiveMappings);
10828 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10829}
10830
10831
10832/**
10833 * Interface for HM and EM to write to a DRx register.
10834 *
10835 * @returns Strict VBox status code.
10836 * @param pVCpu The cross context virtual CPU structure.
10837 * @param cbInstr The instruction length in bytes.
10838 * @param iDrReg The debug register number (destination).
10839 * @param iGReg The general purpose register number (source).
10840 *
10841 * @remarks In ring-0 not all of the state needs to be synced in.
10842 */
10843VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10844{
10845 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10846 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10847 Assert(iDrReg < 8);
10848 Assert(iGReg < 16);
10849
10850 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10851 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10852 Assert(!pVCpu->iem.s.cActiveMappings);
10853 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10854}
10855
10856
10857/**
10858 * Interface for HM and EM to read from a DRx register.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure.
10862 * @param cbInstr The instruction length in bytes.
10863 * @param iGReg The general purpose register number (destination).
10864 * @param iDrReg The debug register number (source).
10865 *
10866 * @remarks In ring-0 not all of the state needs to be synced in.
10867 */
10868VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10869{
10870 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10871 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10872 Assert(iDrReg < 8);
10873 Assert(iGReg < 16);
10874
10875 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10876 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10877 Assert(!pVCpu->iem.s.cActiveMappings);
10878 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10879}
10880
10881
10882/**
10883 * Interface for HM and EM to clear the CR0[TS] bit.
10884 *
10885 * @returns Strict VBox status code.
10886 * @param pVCpu The cross context virtual CPU structure.
10887 * @param cbInstr The instruction length in bytes.
10888 *
10889 * @remarks In ring-0 not all of the state needs to be synced in.
10890 */
10891VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10892{
10893 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10894
10895 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10896 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10897 Assert(!pVCpu->iem.s.cActiveMappings);
10898 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10899}
10900
10901
10902/**
10903 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10904 *
10905 * @returns Strict VBox status code.
10906 * @param pVCpu The cross context virtual CPU structure.
10907 * @param cbInstr The instruction length in bytes.
10908 * @param uValue The value to load into CR0.
10909 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10910 * memory operand. Otherwise pass NIL_RTGCPTR.
10911 *
10912 * @remarks In ring-0 not all of the state needs to be synced in.
10913 */
10914VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10915{
10916 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10917
10918 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10919 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10920 Assert(!pVCpu->iem.s.cActiveMappings);
10921 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10922}
10923
10924
10925/**
10926 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10927 *
10928 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10929 *
10930 * @returns Strict VBox status code.
10931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10932 * @param cbInstr The instruction length in bytes.
10933 * @remarks In ring-0 not all of the state needs to be synced in.
10934 * @thread EMT(pVCpu)
10935 */
10936VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10937{
10938 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10939
10940 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10941 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10942 Assert(!pVCpu->iem.s.cActiveMappings);
10943 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10944}
10945
10946
10947/**
10948 * Interface for HM and EM to emulate the WBINVD instruction.
10949 *
10950 * @returns Strict VBox status code.
10951 * @param pVCpu The cross context virtual CPU structure.
10952 * @param cbInstr The instruction length in bytes.
10953 *
10954 * @remarks In ring-0 not all of the state needs to be synced in.
10955 */
10956VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10957{
10958 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10959
10960 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10961 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10962 Assert(!pVCpu->iem.s.cActiveMappings);
10963 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10964}
10965
10966
10967/**
10968 * Interface for HM and EM to emulate the INVD instruction.
10969 *
10970 * @returns Strict VBox status code.
10971 * @param pVCpu The cross context virtual CPU structure.
10972 * @param cbInstr The instruction length in bytes.
10973 *
10974 * @remarks In ring-0 not all of the state needs to be synced in.
10975 */
10976VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10977{
10978 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10979
10980 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10981 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10982 Assert(!pVCpu->iem.s.cActiveMappings);
10983 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10984}
10985
10986
10987/**
10988 * Interface for HM and EM to emulate the INVLPG instruction.
10989 *
10990 * @returns Strict VBox status code.
10991 * @retval VINF_PGM_SYNC_CR3
10992 *
10993 * @param pVCpu The cross context virtual CPU structure.
10994 * @param cbInstr The instruction length in bytes.
10995 * @param GCPtrPage The effective address of the page to invalidate.
10996 *
10997 * @remarks In ring-0 not all of the state needs to be synced in.
10998 */
10999VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11000{
11001 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11002
11003 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11004 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11005 Assert(!pVCpu->iem.s.cActiveMappings);
11006 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11007}
11008
11009
11010/**
11011 * Interface for HM and EM to emulate the INVPCID instruction.
11012 *
11013 * @returns Strict VBox status code.
11014 * @retval VINF_PGM_SYNC_CR3
11015 *
11016 * @param pVCpu The cross context virtual CPU structure.
11017 * @param cbInstr The instruction length in bytes.
11018 * @param iEffSeg The effective segment register.
11019 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11020 * @param uType The invalidation type.
11021 *
11022 * @remarks In ring-0 not all of the state needs to be synced in.
11023 */
11024VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11025 uint64_t uType)
11026{
11027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11028
11029 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11030 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11031 Assert(!pVCpu->iem.s.cActiveMappings);
11032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11033}
11034
11035
11036/**
11037 * Interface for HM and EM to emulate the CPUID instruction.
11038 *
11039 * @returns Strict VBox status code.
11040 *
11041 * @param pVCpu The cross context virtual CPU structure.
11042 * @param cbInstr The instruction length in bytes.
11043 *
11044 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11045 */
11046VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11047{
11048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11050
11051 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11053 Assert(!pVCpu->iem.s.cActiveMappings);
11054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11055}
11056
11057
11058/**
11059 * Interface for HM and EM to emulate the RDPMC instruction.
11060 *
11061 * @returns Strict VBox status code.
11062 *
11063 * @param pVCpu The cross context virtual CPU structure.
11064 * @param cbInstr The instruction length in bytes.
11065 *
11066 * @remarks Not all of the state needs to be synced in.
11067 */
11068VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11069{
11070 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11071 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11072
11073 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11074 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11075 Assert(!pVCpu->iem.s.cActiveMappings);
11076 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11077}
11078
11079
11080/**
11081 * Interface for HM and EM to emulate the RDTSC instruction.
11082 *
11083 * @returns Strict VBox status code.
11084 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11085 *
11086 * @param pVCpu The cross context virtual CPU structure.
11087 * @param cbInstr The instruction length in bytes.
11088 *
11089 * @remarks Not all of the state needs to be synced in.
11090 */
11091VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11092{
11093 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11094 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11095
11096 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11097 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11098 Assert(!pVCpu->iem.s.cActiveMappings);
11099 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11100}
11101
11102
11103/**
11104 * Interface for HM and EM to emulate the RDTSCP instruction.
11105 *
11106 * @returns Strict VBox status code.
11107 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11108 *
11109 * @param pVCpu The cross context virtual CPU structure.
11110 * @param cbInstr The instruction length in bytes.
11111 *
11112 * @remarks Not all of the state needs to be synced in. Recommended
11113 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11114 */
11115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11116{
11117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11118 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11119
11120 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11121 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11122 Assert(!pVCpu->iem.s.cActiveMappings);
11123 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11124}
11125
11126
11127/**
11128 * Interface for HM and EM to emulate the RDMSR instruction.
11129 *
11130 * @returns Strict VBox status code.
11131 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11132 *
11133 * @param pVCpu The cross context virtual CPU structure.
11134 * @param cbInstr The instruction length in bytes.
11135 *
11136 * @remarks Not all of the state needs to be synced in. Requires RCX and
11137 * (currently) all MSRs.
11138 */
11139VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11140{
11141 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11142 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11143
11144 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11145 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11146 Assert(!pVCpu->iem.s.cActiveMappings);
11147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11148}
11149
11150
11151/**
11152 * Interface for HM and EM to emulate the WRMSR instruction.
11153 *
11154 * @returns Strict VBox status code.
11155 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11156 *
11157 * @param pVCpu The cross context virtual CPU structure.
11158 * @param cbInstr The instruction length in bytes.
11159 *
11160 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11161 * and (currently) all MSRs.
11162 */
11163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11164{
11165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11166 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11167 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11168
11169 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11170 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11171 Assert(!pVCpu->iem.s.cActiveMappings);
11172 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11173}
11174
11175
11176/**
11177 * Interface for HM and EM to emulate the MONITOR instruction.
11178 *
11179 * @returns Strict VBox status code.
11180 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11181 *
11182 * @param pVCpu The cross context virtual CPU structure.
11183 * @param cbInstr The instruction length in bytes.
11184 *
11185 * @remarks Not all of the state needs to be synced in.
11186 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11187 * are used.
11188 */
11189VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11190{
11191 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11193
11194 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11196 Assert(!pVCpu->iem.s.cActiveMappings);
11197 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11198}
11199
11200
11201/**
11202 * Interface for HM and EM to emulate the MWAIT instruction.
11203 *
11204 * @returns Strict VBox status code.
11205 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11206 *
11207 * @param pVCpu The cross context virtual CPU structure.
11208 * @param cbInstr The instruction length in bytes.
11209 *
11210 * @remarks Not all of the state needs to be synced in.
11211 */
11212VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11213{
11214 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11215 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11216
11217 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11219 Assert(!pVCpu->iem.s.cActiveMappings);
11220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11221}
11222
11223
11224/**
11225 * Interface for HM and EM to emulate the HLT instruction.
11226 *
11227 * @returns Strict VBox status code.
11228 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11229 *
11230 * @param pVCpu The cross context virtual CPU structure.
11231 * @param cbInstr The instruction length in bytes.
11232 *
11233 * @remarks Not all of the state needs to be synced in.
11234 */
11235VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11236{
11237 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11238
11239 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11240 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11241 Assert(!pVCpu->iem.s.cActiveMappings);
11242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11243}
11244
11245
11246/**
11247 * Checks if IEM is in the process of delivering an event (interrupt or
11248 * exception).
11249 *
11250 * @returns true if we're in the process of raising an interrupt or exception,
11251 * false otherwise.
11252 * @param pVCpu The cross context virtual CPU structure.
11253 * @param puVector Where to store the vector associated with the
11254 * currently delivered event, optional.
11255 * @param pfFlags Where to store th event delivery flags (see
11256 * IEM_XCPT_FLAGS_XXX), optional.
11257 * @param puErr Where to store the error code associated with the
11258 * event, optional.
11259 * @param puCr2 Where to store the CR2 associated with the event,
11260 * optional.
11261 * @remarks The caller should check the flags to determine if the error code and
11262 * CR2 are valid for the event.
11263 */
11264VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11265{
11266 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11267 if (fRaisingXcpt)
11268 {
11269 if (puVector)
11270 *puVector = pVCpu->iem.s.uCurXcpt;
11271 if (pfFlags)
11272 *pfFlags = pVCpu->iem.s.fCurXcpt;
11273 if (puErr)
11274 *puErr = pVCpu->iem.s.uCurXcptErr;
11275 if (puCr2)
11276 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11277 }
11278 return fRaisingXcpt;
11279}
11280
11281#ifdef IN_RING3
11282
11283/**
11284 * Handles the unlikely and probably fatal merge cases.
11285 *
11286 * @returns Merged status code.
11287 * @param rcStrict Current EM status code.
11288 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11289 * with @a rcStrict.
11290 * @param iMemMap The memory mapping index. For error reporting only.
11291 * @param pVCpu The cross context virtual CPU structure of the calling
11292 * thread, for error reporting only.
11293 */
11294DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11295 unsigned iMemMap, PVMCPUCC pVCpu)
11296{
11297 if (RT_FAILURE_NP(rcStrict))
11298 return rcStrict;
11299
11300 if (RT_FAILURE_NP(rcStrictCommit))
11301 return rcStrictCommit;
11302
11303 if (rcStrict == rcStrictCommit)
11304 return rcStrictCommit;
11305
11306 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11307 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11308 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11309 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11310 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11311 return VERR_IOM_FF_STATUS_IPE;
11312}
11313
11314
11315/**
11316 * Helper for IOMR3ProcessForceFlag.
11317 *
11318 * @returns Merged status code.
11319 * @param rcStrict Current EM status code.
11320 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11321 * with @a rcStrict.
11322 * @param iMemMap The memory mapping index. For error reporting only.
11323 * @param pVCpu The cross context virtual CPU structure of the calling
11324 * thread, for error reporting only.
11325 */
11326DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11327{
11328 /* Simple. */
11329 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11330 return rcStrictCommit;
11331
11332 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11333 return rcStrict;
11334
11335 /* EM scheduling status codes. */
11336 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11337 && rcStrict <= VINF_EM_LAST))
11338 {
11339 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11340 && rcStrictCommit <= VINF_EM_LAST))
11341 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11342 }
11343
11344 /* Unlikely */
11345 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11346}
11347
11348
11349/**
11350 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11351 *
11352 * @returns Merge between @a rcStrict and what the commit operation returned.
11353 * @param pVM The cross context VM structure.
11354 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11355 * @param rcStrict The status code returned by ring-0 or raw-mode.
11356 */
11357VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11358{
11359 /*
11360 * Reset the pending commit.
11361 */
11362 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11363 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11364 ("%#x %#x %#x\n",
11365 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11366 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11367
11368 /*
11369 * Commit the pending bounce buffers (usually just one).
11370 */
11371 unsigned cBufs = 0;
11372 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11373 while (iMemMap-- > 0)
11374 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11375 {
11376 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11377 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11378 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11379
11380 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11381 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11382 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11383
11384 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11385 {
11386 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11388 pbBuf,
11389 cbFirst,
11390 PGMACCESSORIGIN_IEM);
11391 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11392 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11393 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11394 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11395 }
11396
11397 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11398 {
11399 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11401 pbBuf + cbFirst,
11402 cbSecond,
11403 PGMACCESSORIGIN_IEM);
11404 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11405 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11406 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11407 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11408 }
11409 cBufs++;
11410 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11411 }
11412
11413 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11414 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11415 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11416 pVCpu->iem.s.cActiveMappings = 0;
11417 return rcStrict;
11418}
11419
11420#endif /* IN_RING3 */
11421
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette