VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105655

Last change on this file since 105655 was 105616, checked in by vboxsync, 7 months ago

VMM/IEM: Another iemTlbInvalidateLargePageWorkerInner optimization attempt and some stats. bugref:10727

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 474.5 KB
Line 
1/* $Id: IEMAll.cpp 105616 2024-08-07 20:22:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
652 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;
653 ASMBitSet(pTlb->bmLargePage, idxBit);
654# endif
655
656 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
657 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
658 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
659 ? &pTlb->GlobalLargePageRange
660 : &pTlb->NonGlobalLargePageRange;
661 uTagNoRev &= ~(RTGCPTR)fMask;
662 if (uTagNoRev < pRange->uFirstTag)
663 pRange->uFirstTag = uTagNoRev;
664
665 uTagNoRev |= fMask;
666 if (uTagNoRev > pRange->uLastTag)
667 pRange->uLastTag = uTagNoRev;
668
669 RT_NOREF_PV(pVCpu);
670}
671#endif
672
673
674#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
675/**
676 * Worker for iemTlbInvalidateAll.
677 */
678template<bool a_fGlobal>
679DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
680{
681 if (!a_fGlobal)
682 pTlb->cTlsFlushes++;
683 else
684 pTlb->cTlsGlobalFlushes++;
685
686 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
687 if (RT_LIKELY(pTlb->uTlbRevision != 0))
688 { /* very likely */ }
689 else
690 {
691 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
692 pTlb->cTlbRevisionRollovers++;
693 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
694 while (i-- > 0)
695 pTlb->aEntries[i * 2].uTag = 0;
696 }
697
698 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
699 pTlb->NonGlobalLargePageRange.uLastTag = 0;
700 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
701
702 if (a_fGlobal)
703 {
704 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
705 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
706 { /* very likely */ }
707 else
708 {
709 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
710 pTlb->cTlbRevisionRollovers++;
711 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
712 while (i-- > 0)
713 pTlb->aEntries[i * 2 + 1].uTag = 0;
714 }
715
716 pTlb->cTlbGlobalLargePageCurLoads = 0;
717 pTlb->GlobalLargePageRange.uLastTag = 0;
718 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
719 }
720}
721#endif
722
723
724/**
725 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
726 */
727template<bool a_fGlobal>
728DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
729{
730#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
731 Log10(("IEMTlbInvalidateAll\n"));
732
733# ifdef IEM_WITH_CODE_TLB
734 pVCpu->iem.s.cbInstrBufTotal = 0;
735 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
736 if (a_fGlobal)
737 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
738 else
739 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
740# endif
741
742# ifdef IEM_WITH_DATA_TLB
743 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
744 if (a_fGlobal)
745 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
746 else
747 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
748# endif
749#else
750 RT_NOREF(pVCpu);
751#endif
752}
753
754
755/**
756 * Invalidates non-global the IEM TLB entries.
757 *
758 * This is called internally as well as by PGM when moving GC mappings.
759 *
760 * @param pVCpu The cross context virtual CPU structure of the calling
761 * thread.
762 */
763VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
764{
765 iemTlbInvalidateAll<false>(pVCpu);
766}
767
768
769/**
770 * Invalidates all the IEM TLB entries.
771 *
772 * This is called internally as well as by PGM when moving GC mappings.
773 *
774 * @param pVCpu The cross context virtual CPU structure of the calling
775 * thread.
776 */
777VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
778{
779 iemTlbInvalidateAll<true>(pVCpu);
780}
781
782
783#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
784
785/** @todo graduate this to cdefs.h or asm-mem.h. */
786# ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */
787# undef RT_CACHELINE_SIZE
788# define RT_CACHELINE_SIZE 128
789# endif
790
791# if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
792# define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)
793# elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))
794# define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))
795# elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)
796# define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)
797# else
798# define MY_PREFETCH(a_pvAddr) ((void)0)
799# endif
800# if 0
801# undef MY_PREFETCH
802# define MY_PREFETCH(a_pvAddr) ((void)0)
803# endif
804
805/** @def MY_PREFETCH_64
806 * 64 byte prefetch hint, could be more depending on cache line size. */
807/** @def MY_PREFETCH_128
808 * 128 byte prefetch hint. */
809/** @def MY_PREFETCH_256
810 * 256 byte prefetch hint. */
811# if RT_CACHELINE_SIZE >= 128
812 /* 128 byte cache lines */
813# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
814# define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)
815# define MY_PREFETCH_256(a_pvAddr) do { \
816 MY_PREFETCH(a_pvAddr); \
817 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
818 } while (0)
819# else
820 /* 64 byte cache lines */
821# define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
822# define MY_PREFETCH_128(a_pvAddr) do { \
823 MY_PREFETCH(a_pvAddr); \
824 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
825 } while (0)
826# define MY_PREFETCH_256(a_pvAddr) do { \
827 MY_PREFETCH(a_pvAddr); \
828 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
829 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
830 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \
831 } while (0)
832# endif
833
834template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
835DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
836 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
837{
838 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
839 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */
840
841 if (a_fGlobal)
842 pTlb->cTlbInvlPgLargeGlobal += 1;
843 if (a_fNonGlobal)
844 pTlb->cTlbInvlPgLargeNonGlobal += 1;
845
846 /*
847 * Set up the scan.
848 *
849 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
850 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
851 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
852 * that fold large page offsets 1MB-2MB into the 0-1MB range.
853 *
854 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
855 *
856 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for
857 * relevant host architectures.
858 */
859 /** @todo benchmark this code from the guest side. */
860 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
861#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
862 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;
863 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64
864 : IEMTLB_ENTRY_COUNT * 2 / 64;
865#else
866 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
867 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);
868 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
869#endif
870 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
871 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
872 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
873
874 /*
875 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.
876 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.
877 */
878 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
879 if ( !a_fDataTlb
880 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))
881 pVCpu->iem.s.cbInstrBufTotal = 0;
882
883 /*
884 * Combine TAG values with the TLB revisions.
885 */
886 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
887 if (a_fNonGlobal)
888 GCPtrTag |= pTlb->uTlbRevision;
889
890 /*
891 * Do the scanning.
892 */
893#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
894 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX
895 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);
896 /* Scan bitmap entries (64 bits at the time): */
897 for (;;)
898 {
899# if 1
900 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
901 if (bmEntry)
902 {
903 /* Scan the non-zero 64-bit value in groups of 8 bits: */
904 uint64_t bmToClear = 0;
905 uintptr_t idxEven = idxBitmap * 64;
906 uint32_t idxTag = 0;
907 for (;;)
908 {
909 if (bmEntry & 0xff)
910 {
911# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
912 if (a_fNonGlobal) \
913 { \
914 if (bmEntry & a_bmNonGlobal) \
915 { \
916 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
917 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
918 { \
919 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
920 pTlb->aEntries[a_idxEvenIter].GCPhys, \
921 a_idxEvenIter, a_fDataTlb); \
922 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
923 bmToClearSub8 |= a_bmNonGlobal; \
924 } \
925 } \
926 else \
927 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
928 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
929 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
930 } \
931 if (a_fGlobal) \
932 { \
933 if (bmEntry & a_bmGlobal) \
934 { \
935 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
936 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
937 { \
938 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
939 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
940 a_idxEvenIter + 1, a_fDataTlb); \
941 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
942 bmToClearSub8 |= a_bmGlobal; \
943 } \
944 } \
945 else \
946 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
947 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
948 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
949 }
950 uint64_t bmToClearSub8 = 0;
951 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)
952 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)
953 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)
954 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)
955 bmToClear |= bmToClearSub8 << (idxTag * 2);
956# undef ONE_PAIR
957 }
958
959 /* advance to the next 8 bits. */
960 bmEntry >>= 8;
961 if (!bmEntry)
962 break;
963 idxEven += 8;
964 idxTag += 4;
965 }
966
967 /* Clear the large page flags we covered. */
968 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
969 }
970# else
971 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
972 if (bmEntry)
973 {
974 /* Scan the non-zero 64-bit value completely unrolled: */
975 uintptr_t const idxEven = idxBitmap * 64;
976 uint64_t bmToClear = 0;
977# define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
978 if (a_fNonGlobal) \
979 { \
980 if (bmEntry & a_bmNonGlobal) \
981 { \
982 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
983 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
984 { \
985 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
986 pTlb->aEntries[a_idxEvenIter].GCPhys, \
987 a_idxEvenIter, a_fDataTlb); \
988 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
989 bmToClear |= a_bmNonGlobal; \
990 } \
991 } \
992 else \
993 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
994 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
995 != (GCPtrTag & IEMTLB_REVISION_MASK)); \
996 } \
997 if (a_fGlobal) \
998 { \
999 if (bmEntry & a_bmGlobal) \
1000 { \
1001 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
1002 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
1003 { \
1004 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
1005 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
1006 a_idxEvenIter + 1, a_fDataTlb); \
1007 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
1008 bmToClear |= a_bmGlobal; \
1009 } \
1010 } \
1011 else \
1012 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
1013 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
1014 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
1015 } ((void)0)
1016# define FOUR_PAIRS(a_iByte, a_cShift) \
1017 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \
1018 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \
1019 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \
1020 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)
1021 if (bmEntry & (uint32_t)UINT16_MAX)
1022 {
1023 FOUR_PAIRS(0, 0);
1024 FOUR_PAIRS(1, 8);
1025 }
1026 if (bmEntry & ((uint32_t)UINT16_MAX << 16))
1027 {
1028 FOUR_PAIRS(2, 16);
1029 FOUR_PAIRS(3, 24);
1030 }
1031 if (bmEntry & ((uint64_t)UINT16_MAX << 32))
1032 {
1033 FOUR_PAIRS(4, 32);
1034 FOUR_PAIRS(5, 40);
1035 }
1036 if (bmEntry & ((uint64_t)UINT16_MAX << 16))
1037 {
1038 FOUR_PAIRS(6, 48);
1039 FOUR_PAIRS(7, 56);
1040 }
1041# undef FOUR_PAIRS
1042
1043 /* Clear the large page flags we covered. */
1044 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
1045 }
1046# endif
1047
1048 /* advance */
1049 idxBitmap++;
1050 if (idxBitmap >= idxBitmapEnd)
1051 break;
1052 if (a_fNonGlobal)
1053 GCPtrTag += 32;
1054 if (a_fGlobal)
1055 GCPtrTagGlob += 32;
1056 }
1057
1058#else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
1059
1060 for (; idxEven < idxEvenEnd; idxEven += 8)
1061 {
1062# define ONE_ITERATION(a_idxEvenIter) \
1063 if (a_fNonGlobal) \
1064 { \
1065 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \
1066 { \
1067 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
1068 { \
1069 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \
1070 a_idxEvenIter, a_fDataTlb); \
1071 pTlb->aEntries[a_idxEvenIter].uTag = 0; \
1072 } \
1073 } \
1074 GCPtrTag++; \
1075 } \
1076 \
1077 if (a_fGlobal) \
1078 { \
1079 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \
1080 { \
1081 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
1082 { \
1083 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
1084 a_idxEvenIter + 1, a_fDataTlb); \
1085 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
1086 } \
1087 } \
1088 GCPtrTagGlob++; \
1089 }
1090 if (idxEven < idxEvenEnd - 4)
1091 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);
1092 ONE_ITERATION(idxEven)
1093 ONE_ITERATION(idxEven + 2)
1094 ONE_ITERATION(idxEven + 4)
1095 ONE_ITERATION(idxEven + 6)
1096# undef ONE_ITERATION
1097 }
1098#endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
1099}
1100
1101template<bool const a_fDataTlb, bool const a_f2MbLargePage>
1102DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
1103 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
1104{
1105 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
1106
1107 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
1108 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
1109 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
1110 {
1111 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
1112 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
1113 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1114 else
1115 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1116 }
1117 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
1118 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
1119 {
1120 /* Large pages aren't as likely in the non-global TLB half. */
1121 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
1122 }
1123 else
1124 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1125}
1126
1127template<bool const a_fDataTlb>
1128DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT
1129{
1130 pTlb->cTlbInvlPg += 1;
1131
1132 /*
1133 * Flush the entry pair.
1134 */
1135 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
1136 {
1137 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
1138 pTlb->aEntries[idxEven].uTag = 0;
1139 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
1140 pVCpu->iem.s.cbInstrBufTotal = 0;
1141 }
1142 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
1143 {
1144 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
1145 pTlb->aEntries[idxEven + 1].uTag = 0;
1146 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
1147 pVCpu->iem.s.cbInstrBufTotal = 0;
1148 }
1149
1150 /*
1151 * If there are (or has been) large pages in the TLB, we must check if the
1152 * address being flushed may involve one of those, as then we'd have to
1153 * scan for entries relating to the same page and flush those as well.
1154 */
1155# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
1156 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
1157# else
1158 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
1159# endif
1160 {
1161 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
1162 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1163 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1164 else
1165 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
1166 }
1167}
1168
1169#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
1170
1171/**
1172 * Invalidates a page in the TLBs.
1173 *
1174 * @param pVCpu The cross context virtual CPU structure of the calling
1175 * thread.
1176 * @param GCPtr The address of the page to invalidate
1177 * @thread EMT(pVCpu)
1178 */
1179VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1180{
1181 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
1182#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1183 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
1184 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
1185 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
1186 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
1187
1188# ifdef IEM_WITH_CODE_TLB
1189 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
1190# endif
1191# ifdef IEM_WITH_DATA_TLB
1192 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
1193# endif
1194#else
1195 NOREF(pVCpu); NOREF(GCPtr);
1196#endif
1197}
1198
1199
1200#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1201/**
1202 * Invalid both TLBs slow fashion following a rollover.
1203 *
1204 * Worker for IEMTlbInvalidateAllPhysical,
1205 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
1206 * iemMemMapJmp and others.
1207 *
1208 * @thread EMT(pVCpu)
1209 */
1210static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
1211{
1212 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
1213 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
1214 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
1215
1216 unsigned i;
1217# ifdef IEM_WITH_CODE_TLB
1218 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1219 while (i-- > 0)
1220 {
1221 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1222 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
1223 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
1224 }
1225 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
1226 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1227# endif
1228# ifdef IEM_WITH_DATA_TLB
1229 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1230 while (i-- > 0)
1231 {
1232 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1233 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
1234 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
1235 }
1236 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
1237 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1238# endif
1239
1240}
1241#endif
1242
1243
1244/**
1245 * Invalidates the host physical aspects of the IEM TLBs.
1246 *
1247 * This is called internally as well as by PGM when moving GC mappings.
1248 *
1249 * @param pVCpu The cross context virtual CPU structure of the calling
1250 * thread.
1251 * @note Currently not used.
1252 */
1253VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1254{
1255#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1256 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1257 Log10(("IEMTlbInvalidateAllPhysical\n"));
1258
1259# ifdef IEM_WITH_CODE_TLB
1260 pVCpu->iem.s.cbInstrBufTotal = 0;
1261# endif
1262 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1263 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
1264 {
1265 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1266 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1267 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1268 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1269 }
1270 else
1271 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1272#else
1273 NOREF(pVCpu);
1274#endif
1275}
1276
1277
1278/**
1279 * Invalidates the host physical aspects of the IEM TLBs.
1280 *
1281 * This is called internally as well as by PGM when moving GC mappings.
1282 *
1283 * @param pVM The cross context VM structure.
1284 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1285 * otherwise NIL_VMCPUID.
1286 * @param enmReason The reason we're called.
1287 *
1288 * @remarks Caller holds the PGM lock.
1289 */
1290VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1291{
1292#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1293 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1294 if (pVCpuCaller)
1295 VMCPU_ASSERT_EMT(pVCpuCaller);
1296 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1297
1298 VMCC_FOR_EACH_VMCPU(pVM)
1299 {
1300# ifdef IEM_WITH_CODE_TLB
1301 if (pVCpuCaller == pVCpu)
1302 pVCpu->iem.s.cbInstrBufTotal = 0;
1303# endif
1304
1305 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1306 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1307 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1308 { /* likely */}
1309 else if (pVCpuCaller != pVCpu)
1310 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1311 else
1312 {
1313 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1314 continue;
1315 }
1316 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1317 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1318
1319 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1320 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1321 }
1322 VMCC_FOR_EACH_VMCPU_END(pVM);
1323
1324#else
1325 RT_NOREF(pVM, idCpuCaller, enmReason);
1326#endif
1327}
1328
1329
1330/**
1331 * Flushes the prefetch buffer, light version.
1332 */
1333void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1334{
1335#ifndef IEM_WITH_CODE_TLB
1336 pVCpu->iem.s.cbOpcode = cbInstr;
1337#else
1338 RT_NOREF(pVCpu, cbInstr);
1339#endif
1340}
1341
1342
1343/**
1344 * Flushes the prefetch buffer, heavy version.
1345 */
1346void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1347{
1348#ifndef IEM_WITH_CODE_TLB
1349 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1350#elif 1
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 RT_NOREF(cbInstr);
1353#else
1354 RT_NOREF(pVCpu, cbInstr);
1355#endif
1356}
1357
1358
1359
1360#ifdef IEM_WITH_CODE_TLB
1361
1362/**
1363 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1364 * failure and jumps.
1365 *
1366 * We end up here for a number of reasons:
1367 * - pbInstrBuf isn't yet initialized.
1368 * - Advancing beyond the buffer boundrary (e.g. cross page).
1369 * - Advancing beyond the CS segment limit.
1370 * - Fetching from non-mappable page (e.g. MMIO).
1371 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1372 *
1373 * @param pVCpu The cross context virtual CPU structure of the
1374 * calling thread.
1375 * @param pvDst Where to return the bytes.
1376 * @param cbDst Number of bytes to read. A value of zero is
1377 * allowed for initializing pbInstrBuf (the
1378 * recompiler does this). In this case it is best
1379 * to set pbInstrBuf to NULL prior to the call.
1380 */
1381void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1382{
1383# ifdef IN_RING3
1384 for (;;)
1385 {
1386 Assert(cbDst <= 8);
1387 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1388
1389 /*
1390 * We might have a partial buffer match, deal with that first to make the
1391 * rest simpler. This is the first part of the cross page/buffer case.
1392 */
1393 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1394 if (pbInstrBuf != NULL)
1395 {
1396 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1397 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1398 if (offBuf < cbInstrBuf)
1399 {
1400 Assert(offBuf + cbDst > cbInstrBuf);
1401 uint32_t const cbCopy = cbInstrBuf - offBuf;
1402 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1403
1404 cbDst -= cbCopy;
1405 pvDst = (uint8_t *)pvDst + cbCopy;
1406 offBuf += cbCopy;
1407 }
1408 }
1409
1410 /*
1411 * Check segment limit, figuring how much we're allowed to access at this point.
1412 *
1413 * We will fault immediately if RIP is past the segment limit / in non-canonical
1414 * territory. If we do continue, there are one or more bytes to read before we
1415 * end up in trouble and we need to do that first before faulting.
1416 */
1417 RTGCPTR GCPtrFirst;
1418 uint32_t cbMaxRead;
1419 if (IEM_IS_64BIT_CODE(pVCpu))
1420 {
1421 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1422 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1423 { /* likely */ }
1424 else
1425 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1426 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1427 }
1428 else
1429 {
1430 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1431 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1432 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1433 { /* likely */ }
1434 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1435 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1436 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1437 if (cbMaxRead != 0)
1438 { /* likely */ }
1439 else
1440 {
1441 /* Overflowed because address is 0 and limit is max. */
1442 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1443 cbMaxRead = X86_PAGE_SIZE;
1444 }
1445 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1446 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1447 if (cbMaxRead2 < cbMaxRead)
1448 cbMaxRead = cbMaxRead2;
1449 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1450 }
1451
1452 /*
1453 * Get the TLB entry for this piece of code.
1454 */
1455 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1456 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1457 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1458 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1459 {
1460 /* likely when executing lots of code, otherwise unlikely */
1461# ifdef IEM_WITH_TLB_STATISTICS
1462 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1463# endif
1464 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1465
1466 /* Check TLB page table level access flags. */
1467 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1468 {
1469 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1470 {
1471 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1472 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1473 }
1474 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1475 {
1476 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1477 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1478 }
1479 }
1480
1481 /* Look up the physical page info if necessary. */
1482 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1483 { /* not necessary */ }
1484 else
1485 {
1486 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1487 { /* likely */ }
1488 else
1489 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1490 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1491 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1492 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1493 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1494 }
1495 }
1496 else
1497 {
1498 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1499
1500 /* This page table walking will set A bits as required by the access while performing the walk.
1501 ASSUMES these are set when the address is translated rather than on commit... */
1502 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1503 PGMPTWALKFAST WalkFast;
1504 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1505 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1506 &WalkFast);
1507 if (RT_SUCCESS(rc))
1508 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1509 else
1510 {
1511# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1512 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1513 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1514# endif
1515 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1516 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1517 }
1518
1519 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1520 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1521 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1522 {
1523 pTlbe--;
1524 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1525 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1526 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1527# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1528 else
1529 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
1530# endif
1531 }
1532 else
1533 {
1534 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1535 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1536 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1537 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1538# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1539 else
1540 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
1541# endif
1542 }
1543 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1544 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1545 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1546 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1547 pTlbe->GCPhys = GCPhysPg;
1548 pTlbe->pbMappingR3 = NULL;
1549 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1550 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1551 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1552
1553 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
1554 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1555 else
1556 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1557
1558 /* Resolve the physical address. */
1559 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1560 { /* likely */ }
1561 else
1562 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1563 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1564 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1565 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1566 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1567 }
1568
1569# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1570 /*
1571 * Try do a direct read using the pbMappingR3 pointer.
1572 * Note! Do not recheck the physical TLB revision number here as we have the
1573 * wrong response to changes in the else case. If someone is updating
1574 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1575 * pretending we always won the race.
1576 */
1577 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1578 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1579 {
1580 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1581 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1582 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1583 {
1584 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1585 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1586 }
1587 else
1588 {
1589 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1590 if (cbInstr + (uint32_t)cbDst <= 15)
1591 {
1592 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1593 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1594 }
1595 else
1596 {
1597 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1598 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1599 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1600 }
1601 }
1602 if (cbDst <= cbMaxRead)
1603 {
1604 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1605 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1606
1607 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1608 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1609 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1610 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1611 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1612 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1613 else
1614 Assert(!pvDst);
1615 return;
1616 }
1617 pVCpu->iem.s.pbInstrBuf = NULL;
1618
1619 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1620 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1621 }
1622# else
1623# error "refactor as needed"
1624 /*
1625 * If there is no special read handling, so we can read a bit more and
1626 * put it in the prefetch buffer.
1627 */
1628 if ( cbDst < cbMaxRead
1629 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1630 {
1631 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1632 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1633 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1634 { /* likely */ }
1635 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1636 {
1637 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1638 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1639 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1640 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1641 }
1642 else
1643 {
1644 Log((RT_SUCCESS(rcStrict)
1645 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1646 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1647 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1648 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1649 }
1650 }
1651# endif
1652 /*
1653 * Special read handling, so only read exactly what's needed.
1654 * This is a highly unlikely scenario.
1655 */
1656 else
1657 {
1658 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1659
1660 /* Check instruction length. */
1661 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1662 if (RT_LIKELY(cbInstr + cbDst <= 15))
1663 { /* likely */ }
1664 else
1665 {
1666 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1667 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1668 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1669 }
1670
1671 /* Do the reading. */
1672 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1673 if (cbToRead > 0)
1674 {
1675 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1676 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1677 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1678 { /* likely */ }
1679 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1680 {
1681 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1682 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1683 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1684 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1685 }
1686 else
1687 {
1688 Log((RT_SUCCESS(rcStrict)
1689 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1690 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1691 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1692 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1693 }
1694 }
1695
1696 /* Update the state and probably return. */
1697 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1698 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1699 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1700
1701 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1702 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1703 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1704 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1705 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1706 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1707 pVCpu->iem.s.pbInstrBuf = NULL;
1708 if (cbToRead == cbDst)
1709 return;
1710 Assert(cbToRead == cbMaxRead);
1711 }
1712
1713 /*
1714 * More to read, loop.
1715 */
1716 cbDst -= cbMaxRead;
1717 pvDst = (uint8_t *)pvDst + cbMaxRead;
1718 }
1719# else /* !IN_RING3 */
1720 RT_NOREF(pvDst, cbDst);
1721 if (pvDst || cbDst)
1722 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1723# endif /* !IN_RING3 */
1724}
1725
1726#else /* !IEM_WITH_CODE_TLB */
1727
1728/**
1729 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1730 * exception if it fails.
1731 *
1732 * @returns Strict VBox status code.
1733 * @param pVCpu The cross context virtual CPU structure of the
1734 * calling thread.
1735 * @param cbMin The minimum number of bytes relative offOpcode
1736 * that must be read.
1737 */
1738VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1739{
1740 /*
1741 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1742 *
1743 * First translate CS:rIP to a physical address.
1744 */
1745 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1746 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1747 uint8_t const cbLeft = cbOpcode - offOpcode;
1748 Assert(cbLeft < cbMin);
1749 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1750
1751 uint32_t cbToTryRead;
1752 RTGCPTR GCPtrNext;
1753 if (IEM_IS_64BIT_CODE(pVCpu))
1754 {
1755 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1756 if (!IEM_IS_CANONICAL(GCPtrNext))
1757 return iemRaiseGeneralProtectionFault0(pVCpu);
1758 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1759 }
1760 else
1761 {
1762 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1763 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1764 GCPtrNext32 += cbOpcode;
1765 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1766 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1767 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1768 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1769 if (!cbToTryRead) /* overflowed */
1770 {
1771 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1772 cbToTryRead = UINT32_MAX;
1773 /** @todo check out wrapping around the code segment. */
1774 }
1775 if (cbToTryRead < cbMin - cbLeft)
1776 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1777 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1778
1779 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1780 if (cbToTryRead > cbLeftOnPage)
1781 cbToTryRead = cbLeftOnPage;
1782 }
1783
1784 /* Restrict to opcode buffer space.
1785
1786 We're making ASSUMPTIONS here based on work done previously in
1787 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1788 be fetched in case of an instruction crossing two pages. */
1789 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1790 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1791 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1792 { /* likely */ }
1793 else
1794 {
1795 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1796 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1797 return iemRaiseGeneralProtectionFault0(pVCpu);
1798 }
1799
1800 PGMPTWALKFAST WalkFast;
1801 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1802 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1803 &WalkFast);
1804 if (RT_SUCCESS(rc))
1805 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1806 else
1807 {
1808 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1809#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1810 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1811 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1812#endif
1813 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1814 }
1815 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1816 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1817
1818 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1819 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1820
1821 /*
1822 * Read the bytes at this address.
1823 *
1824 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1825 * and since PATM should only patch the start of an instruction there
1826 * should be no need to check again here.
1827 */
1828 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1829 {
1830 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1831 cbToTryRead, PGMACCESSORIGIN_IEM);
1832 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1833 { /* likely */ }
1834 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1835 {
1836 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1837 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1838 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1839 }
1840 else
1841 {
1842 Log((RT_SUCCESS(rcStrict)
1843 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1844 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1845 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1846 return rcStrict;
1847 }
1848 }
1849 else
1850 {
1851 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1852 if (RT_SUCCESS(rc))
1853 { /* likely */ }
1854 else
1855 {
1856 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1857 return rc;
1858 }
1859 }
1860 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1861 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1862
1863 return VINF_SUCCESS;
1864}
1865
1866#endif /* !IEM_WITH_CODE_TLB */
1867#ifndef IEM_WITH_SETJMP
1868
1869/**
1870 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1871 *
1872 * @returns Strict VBox status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param pb Where to return the opcode byte.
1876 */
1877VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1878{
1879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1880 if (rcStrict == VINF_SUCCESS)
1881 {
1882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1883 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1884 pVCpu->iem.s.offOpcode = offOpcode + 1;
1885 }
1886 else
1887 *pb = 0;
1888 return rcStrict;
1889}
1890
1891#else /* IEM_WITH_SETJMP */
1892
1893/**
1894 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1895 *
1896 * @returns The opcode byte.
1897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1898 */
1899uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1900{
1901# ifdef IEM_WITH_CODE_TLB
1902 uint8_t u8;
1903 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1904 return u8;
1905# else
1906 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1907 if (rcStrict == VINF_SUCCESS)
1908 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1909 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1910# endif
1911}
1912
1913#endif /* IEM_WITH_SETJMP */
1914
1915#ifndef IEM_WITH_SETJMP
1916
1917/**
1918 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1919 *
1920 * @returns Strict VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1922 * @param pu16 Where to return the opcode dword.
1923 */
1924VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1925{
1926 uint8_t u8;
1927 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1928 if (rcStrict == VINF_SUCCESS)
1929 *pu16 = (int8_t)u8;
1930 return rcStrict;
1931}
1932
1933
1934/**
1935 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1936 *
1937 * @returns Strict VBox status code.
1938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1939 * @param pu32 Where to return the opcode dword.
1940 */
1941VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1942{
1943 uint8_t u8;
1944 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1945 if (rcStrict == VINF_SUCCESS)
1946 *pu32 = (int8_t)u8;
1947 return rcStrict;
1948}
1949
1950
1951/**
1952 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1953 *
1954 * @returns Strict VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1956 * @param pu64 Where to return the opcode qword.
1957 */
1958VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1959{
1960 uint8_t u8;
1961 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1962 if (rcStrict == VINF_SUCCESS)
1963 *pu64 = (int8_t)u8;
1964 return rcStrict;
1965}
1966
1967#endif /* !IEM_WITH_SETJMP */
1968
1969
1970#ifndef IEM_WITH_SETJMP
1971
1972/**
1973 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1974 *
1975 * @returns Strict VBox status code.
1976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1977 * @param pu16 Where to return the opcode word.
1978 */
1979VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1980{
1981 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1982 if (rcStrict == VINF_SUCCESS)
1983 {
1984 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1985# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1986 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1987# else
1988 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1989# endif
1990 pVCpu->iem.s.offOpcode = offOpcode + 2;
1991 }
1992 else
1993 *pu16 = 0;
1994 return rcStrict;
1995}
1996
1997#else /* IEM_WITH_SETJMP */
1998
1999/**
2000 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2001 *
2002 * @returns The opcode word.
2003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2004 */
2005uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2006{
2007# ifdef IEM_WITH_CODE_TLB
2008 uint16_t u16;
2009 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2010 return u16;
2011# else
2012 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2013 if (rcStrict == VINF_SUCCESS)
2014 {
2015 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2016 pVCpu->iem.s.offOpcode += 2;
2017# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2018 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2019# else
2020 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2021# endif
2022 }
2023 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2024# endif
2025}
2026
2027#endif /* IEM_WITH_SETJMP */
2028
2029#ifndef IEM_WITH_SETJMP
2030
2031/**
2032 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2033 *
2034 * @returns Strict VBox status code.
2035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2036 * @param pu32 Where to return the opcode double word.
2037 */
2038VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
2039{
2040 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2041 if (rcStrict == VINF_SUCCESS)
2042 {
2043 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2044 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2045 pVCpu->iem.s.offOpcode = offOpcode + 2;
2046 }
2047 else
2048 *pu32 = 0;
2049 return rcStrict;
2050}
2051
2052
2053/**
2054 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2055 *
2056 * @returns Strict VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2058 * @param pu64 Where to return the opcode quad word.
2059 */
2060VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2061{
2062 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2063 if (rcStrict == VINF_SUCCESS)
2064 {
2065 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2066 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2067 pVCpu->iem.s.offOpcode = offOpcode + 2;
2068 }
2069 else
2070 *pu64 = 0;
2071 return rcStrict;
2072}
2073
2074#endif /* !IEM_WITH_SETJMP */
2075
2076#ifndef IEM_WITH_SETJMP
2077
2078/**
2079 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2080 *
2081 * @returns Strict VBox status code.
2082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2083 * @param pu32 Where to return the opcode dword.
2084 */
2085VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
2086{
2087 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2088 if (rcStrict == VINF_SUCCESS)
2089 {
2090 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2091# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2092 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2093# else
2094 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2095 pVCpu->iem.s.abOpcode[offOpcode + 1],
2096 pVCpu->iem.s.abOpcode[offOpcode + 2],
2097 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2098# endif
2099 pVCpu->iem.s.offOpcode = offOpcode + 4;
2100 }
2101 else
2102 *pu32 = 0;
2103 return rcStrict;
2104}
2105
2106#else /* IEM_WITH_SETJMP */
2107
2108/**
2109 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2110 *
2111 * @returns The opcode dword.
2112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2113 */
2114uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2115{
2116# ifdef IEM_WITH_CODE_TLB
2117 uint32_t u32;
2118 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2119 return u32;
2120# else
2121 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2122 if (rcStrict == VINF_SUCCESS)
2123 {
2124 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2125 pVCpu->iem.s.offOpcode = offOpcode + 4;
2126# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2127 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2128# else
2129 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2130 pVCpu->iem.s.abOpcode[offOpcode + 1],
2131 pVCpu->iem.s.abOpcode[offOpcode + 2],
2132 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2133# endif
2134 }
2135 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2136# endif
2137}
2138
2139#endif /* IEM_WITH_SETJMP */
2140
2141#ifndef IEM_WITH_SETJMP
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2145 *
2146 * @returns Strict VBox status code.
2147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2148 * @param pu64 Where to return the opcode dword.
2149 */
2150VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2151{
2152 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2153 if (rcStrict == VINF_SUCCESS)
2154 {
2155 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2156 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2157 pVCpu->iem.s.abOpcode[offOpcode + 1],
2158 pVCpu->iem.s.abOpcode[offOpcode + 2],
2159 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2160 pVCpu->iem.s.offOpcode = offOpcode + 4;
2161 }
2162 else
2163 *pu64 = 0;
2164 return rcStrict;
2165}
2166
2167
2168/**
2169 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2170 *
2171 * @returns Strict VBox status code.
2172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2173 * @param pu64 Where to return the opcode qword.
2174 */
2175VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2176{
2177 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2178 if (rcStrict == VINF_SUCCESS)
2179 {
2180 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2181 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2182 pVCpu->iem.s.abOpcode[offOpcode + 1],
2183 pVCpu->iem.s.abOpcode[offOpcode + 2],
2184 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2185 pVCpu->iem.s.offOpcode = offOpcode + 4;
2186 }
2187 else
2188 *pu64 = 0;
2189 return rcStrict;
2190}
2191
2192#endif /* !IEM_WITH_SETJMP */
2193
2194#ifndef IEM_WITH_SETJMP
2195
2196/**
2197 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2198 *
2199 * @returns Strict VBox status code.
2200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2201 * @param pu64 Where to return the opcode qword.
2202 */
2203VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
2204{
2205 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2206 if (rcStrict == VINF_SUCCESS)
2207 {
2208 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2210 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2211# else
2212 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2213 pVCpu->iem.s.abOpcode[offOpcode + 1],
2214 pVCpu->iem.s.abOpcode[offOpcode + 2],
2215 pVCpu->iem.s.abOpcode[offOpcode + 3],
2216 pVCpu->iem.s.abOpcode[offOpcode + 4],
2217 pVCpu->iem.s.abOpcode[offOpcode + 5],
2218 pVCpu->iem.s.abOpcode[offOpcode + 6],
2219 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2220# endif
2221 pVCpu->iem.s.offOpcode = offOpcode + 8;
2222 }
2223 else
2224 *pu64 = 0;
2225 return rcStrict;
2226}
2227
2228#else /* IEM_WITH_SETJMP */
2229
2230/**
2231 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2232 *
2233 * @returns The opcode qword.
2234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2235 */
2236uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2237{
2238# ifdef IEM_WITH_CODE_TLB
2239 uint64_t u64;
2240 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2241 return u64;
2242# else
2243 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2244 if (rcStrict == VINF_SUCCESS)
2245 {
2246 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2247 pVCpu->iem.s.offOpcode = offOpcode + 8;
2248# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2249 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2250# else
2251 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2252 pVCpu->iem.s.abOpcode[offOpcode + 1],
2253 pVCpu->iem.s.abOpcode[offOpcode + 2],
2254 pVCpu->iem.s.abOpcode[offOpcode + 3],
2255 pVCpu->iem.s.abOpcode[offOpcode + 4],
2256 pVCpu->iem.s.abOpcode[offOpcode + 5],
2257 pVCpu->iem.s.abOpcode[offOpcode + 6],
2258 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2259# endif
2260 }
2261 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2262# endif
2263}
2264
2265#endif /* IEM_WITH_SETJMP */
2266
2267
2268
2269/** @name Misc Worker Functions.
2270 * @{
2271 */
2272
2273/**
2274 * Gets the exception class for the specified exception vector.
2275 *
2276 * @returns The class of the specified exception.
2277 * @param uVector The exception vector.
2278 */
2279static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
2280{
2281 Assert(uVector <= X86_XCPT_LAST);
2282 switch (uVector)
2283 {
2284 case X86_XCPT_DE:
2285 case X86_XCPT_TS:
2286 case X86_XCPT_NP:
2287 case X86_XCPT_SS:
2288 case X86_XCPT_GP:
2289 case X86_XCPT_SX: /* AMD only */
2290 return IEMXCPTCLASS_CONTRIBUTORY;
2291
2292 case X86_XCPT_PF:
2293 case X86_XCPT_VE: /* Intel only */
2294 return IEMXCPTCLASS_PAGE_FAULT;
2295
2296 case X86_XCPT_DF:
2297 return IEMXCPTCLASS_DOUBLE_FAULT;
2298 }
2299 return IEMXCPTCLASS_BENIGN;
2300}
2301
2302
2303/**
2304 * Evaluates how to handle an exception caused during delivery of another event
2305 * (exception / interrupt).
2306 *
2307 * @returns How to handle the recursive exception.
2308 * @param pVCpu The cross context virtual CPU structure of the
2309 * calling thread.
2310 * @param fPrevFlags The flags of the previous event.
2311 * @param uPrevVector The vector of the previous event.
2312 * @param fCurFlags The flags of the current exception.
2313 * @param uCurVector The vector of the current exception.
2314 * @param pfXcptRaiseInfo Where to store additional information about the
2315 * exception condition. Optional.
2316 */
2317VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2318 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2319{
2320 /*
2321 * Only CPU exceptions can be raised while delivering other events, software interrupt
2322 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2323 */
2324 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2325 Assert(pVCpu); RT_NOREF(pVCpu);
2326 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2327
2328 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2329 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2330 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2331 {
2332 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2333 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2334 {
2335 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2336 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2337 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2338 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2339 {
2340 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2341 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2342 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2343 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2344 uCurVector, pVCpu->cpum.GstCtx.cr2));
2345 }
2346 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2347 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2348 {
2349 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2350 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2351 }
2352 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2353 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2354 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2355 {
2356 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2357 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2358 }
2359 }
2360 else
2361 {
2362 if (uPrevVector == X86_XCPT_NMI)
2363 {
2364 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2365 if (uCurVector == X86_XCPT_PF)
2366 {
2367 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2368 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2369 }
2370 }
2371 else if ( uPrevVector == X86_XCPT_AC
2372 && uCurVector == X86_XCPT_AC)
2373 {
2374 enmRaise = IEMXCPTRAISE_CPU_HANG;
2375 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2376 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2377 }
2378 }
2379 }
2380 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2381 {
2382 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2383 if (uCurVector == X86_XCPT_PF)
2384 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2385 }
2386 else
2387 {
2388 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2389 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2390 }
2391
2392 if (pfXcptRaiseInfo)
2393 *pfXcptRaiseInfo = fRaiseInfo;
2394 return enmRaise;
2395}
2396
2397
2398/**
2399 * Enters the CPU shutdown state initiated by a triple fault or other
2400 * unrecoverable conditions.
2401 *
2402 * @returns Strict VBox status code.
2403 * @param pVCpu The cross context virtual CPU structure of the
2404 * calling thread.
2405 */
2406static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2407{
2408 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2409 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2410
2411 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2412 {
2413 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2414 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2415 }
2416
2417 RT_NOREF(pVCpu);
2418 return VINF_EM_TRIPLE_FAULT;
2419}
2420
2421
2422/**
2423 * Validates a new SS segment.
2424 *
2425 * @returns VBox strict status code.
2426 * @param pVCpu The cross context virtual CPU structure of the
2427 * calling thread.
2428 * @param NewSS The new SS selctor.
2429 * @param uCpl The CPL to load the stack for.
2430 * @param pDesc Where to return the descriptor.
2431 */
2432static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2433{
2434 /* Null selectors are not allowed (we're not called for dispatching
2435 interrupts with SS=0 in long mode). */
2436 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2437 {
2438 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2439 return iemRaiseTaskSwitchFault0(pVCpu);
2440 }
2441
2442 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2443 if ((NewSS & X86_SEL_RPL) != uCpl)
2444 {
2445 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2446 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2447 }
2448
2449 /*
2450 * Read the descriptor.
2451 */
2452 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2453 if (rcStrict != VINF_SUCCESS)
2454 return rcStrict;
2455
2456 /*
2457 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2458 */
2459 if (!pDesc->Legacy.Gen.u1DescType)
2460 {
2461 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2462 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2463 }
2464
2465 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2466 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2467 {
2468 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2469 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2470 }
2471 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2472 {
2473 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2474 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2475 }
2476
2477 /* Is it there? */
2478 /** @todo testcase: Is this checked before the canonical / limit check below? */
2479 if (!pDesc->Legacy.Gen.u1Present)
2480 {
2481 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2482 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2483 }
2484
2485 return VINF_SUCCESS;
2486}
2487
2488/** @} */
2489
2490
2491/** @name Raising Exceptions.
2492 *
2493 * @{
2494 */
2495
2496
2497/**
2498 * Loads the specified stack far pointer from the TSS.
2499 *
2500 * @returns VBox strict status code.
2501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2502 * @param uCpl The CPL to load the stack for.
2503 * @param pSelSS Where to return the new stack segment.
2504 * @param puEsp Where to return the new stack pointer.
2505 */
2506static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2507{
2508 VBOXSTRICTRC rcStrict;
2509 Assert(uCpl < 4);
2510
2511 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2512 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2513 {
2514 /*
2515 * 16-bit TSS (X86TSS16).
2516 */
2517 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2518 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2519 {
2520 uint32_t off = uCpl * 4 + 2;
2521 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2522 {
2523 /** @todo check actual access pattern here. */
2524 uint32_t u32Tmp = 0; /* gcc maybe... */
2525 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2526 if (rcStrict == VINF_SUCCESS)
2527 {
2528 *puEsp = RT_LOWORD(u32Tmp);
2529 *pSelSS = RT_HIWORD(u32Tmp);
2530 return VINF_SUCCESS;
2531 }
2532 }
2533 else
2534 {
2535 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2536 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2537 }
2538 break;
2539 }
2540
2541 /*
2542 * 32-bit TSS (X86TSS32).
2543 */
2544 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2545 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2546 {
2547 uint32_t off = uCpl * 8 + 4;
2548 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2549 {
2550/** @todo check actual access pattern here. */
2551 uint64_t u64Tmp;
2552 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2553 if (rcStrict == VINF_SUCCESS)
2554 {
2555 *puEsp = u64Tmp & UINT32_MAX;
2556 *pSelSS = (RTSEL)(u64Tmp >> 32);
2557 return VINF_SUCCESS;
2558 }
2559 }
2560 else
2561 {
2562 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2563 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2564 }
2565 break;
2566 }
2567
2568 default:
2569 AssertFailed();
2570 rcStrict = VERR_IEM_IPE_4;
2571 break;
2572 }
2573
2574 *puEsp = 0; /* make gcc happy */
2575 *pSelSS = 0; /* make gcc happy */
2576 return rcStrict;
2577}
2578
2579
2580/**
2581 * Loads the specified stack pointer from the 64-bit TSS.
2582 *
2583 * @returns VBox strict status code.
2584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2585 * @param uCpl The CPL to load the stack for.
2586 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2587 * @param puRsp Where to return the new stack pointer.
2588 */
2589static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2590{
2591 Assert(uCpl < 4);
2592 Assert(uIst < 8);
2593 *puRsp = 0; /* make gcc happy */
2594
2595 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2596 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2597
2598 uint32_t off;
2599 if (uIst)
2600 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2601 else
2602 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2603 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2604 {
2605 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2606 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2607 }
2608
2609 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2610}
2611
2612
2613/**
2614 * Adjust the CPU state according to the exception being raised.
2615 *
2616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2617 * @param u8Vector The exception that has been raised.
2618 */
2619DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2620{
2621 switch (u8Vector)
2622 {
2623 case X86_XCPT_DB:
2624 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2625 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2626 break;
2627 /** @todo Read the AMD and Intel exception reference... */
2628 }
2629}
2630
2631
2632/**
2633 * Implements exceptions and interrupts for real mode.
2634 *
2635 * @returns VBox strict status code.
2636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2637 * @param cbInstr The number of bytes to offset rIP by in the return
2638 * address.
2639 * @param u8Vector The interrupt / exception vector number.
2640 * @param fFlags The flags.
2641 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2642 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2643 */
2644static VBOXSTRICTRC
2645iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2646 uint8_t cbInstr,
2647 uint8_t u8Vector,
2648 uint32_t fFlags,
2649 uint16_t uErr,
2650 uint64_t uCr2) RT_NOEXCEPT
2651{
2652 NOREF(uErr); NOREF(uCr2);
2653 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2654
2655 /*
2656 * Read the IDT entry.
2657 */
2658 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2659 {
2660 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2661 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2662 }
2663 RTFAR16 Idte;
2664 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2665 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2666 {
2667 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2668 return rcStrict;
2669 }
2670
2671#ifdef LOG_ENABLED
2672 /* If software interrupt, try decode it if logging is enabled and such. */
2673 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2674 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2675 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2676#endif
2677
2678 /*
2679 * Push the stack frame.
2680 */
2681 uint8_t bUnmapInfo;
2682 uint16_t *pu16Frame;
2683 uint64_t uNewRsp;
2684 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2685 if (rcStrict != VINF_SUCCESS)
2686 return rcStrict;
2687
2688 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2689#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2690 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2691 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2692 fEfl |= UINT16_C(0xf000);
2693#endif
2694 pu16Frame[2] = (uint16_t)fEfl;
2695 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2696 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2697 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2698 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2699 return rcStrict;
2700
2701 /*
2702 * Load the vector address into cs:ip and make exception specific state
2703 * adjustments.
2704 */
2705 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2706 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2707 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2708 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2709 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2710 pVCpu->cpum.GstCtx.rip = Idte.off;
2711 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2712 IEMMISC_SET_EFL(pVCpu, fEfl);
2713
2714 /** @todo do we actually do this in real mode? */
2715 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2716 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2717
2718 /*
2719 * Deal with debug events that follows the exception and clear inhibit flags.
2720 */
2721 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2722 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2723 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2724 else
2725 {
2726 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2727 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2728 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2729 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2730 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2731 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2732 return iemRaiseDebugException(pVCpu);
2733 }
2734
2735 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2736 so best leave them alone in case we're in a weird kind of real mode... */
2737
2738 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2739}
2740
2741
2742/**
2743 * Loads a NULL data selector into when coming from V8086 mode.
2744 *
2745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2746 * @param pSReg Pointer to the segment register.
2747 */
2748DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2749{
2750 pSReg->Sel = 0;
2751 pSReg->ValidSel = 0;
2752 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2753 {
2754 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2755 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2756 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2757 }
2758 else
2759 {
2760 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2761 /** @todo check this on AMD-V */
2762 pSReg->u64Base = 0;
2763 pSReg->u32Limit = 0;
2764 }
2765}
2766
2767
2768/**
2769 * Loads a segment selector during a task switch in V8086 mode.
2770 *
2771 * @param pSReg Pointer to the segment register.
2772 * @param uSel The selector value to load.
2773 */
2774DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2775{
2776 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2777 pSReg->Sel = uSel;
2778 pSReg->ValidSel = uSel;
2779 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2780 pSReg->u64Base = uSel << 4;
2781 pSReg->u32Limit = 0xffff;
2782 pSReg->Attr.u = 0xf3;
2783}
2784
2785
2786/**
2787 * Loads a segment selector during a task switch in protected mode.
2788 *
2789 * In this task switch scenario, we would throw \#TS exceptions rather than
2790 * \#GPs.
2791 *
2792 * @returns VBox strict status code.
2793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2794 * @param pSReg Pointer to the segment register.
2795 * @param uSel The new selector value.
2796 *
2797 * @remarks This does _not_ handle CS or SS.
2798 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2799 */
2800static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2801{
2802 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2803
2804 /* Null data selector. */
2805 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2806 {
2807 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2809 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2810 return VINF_SUCCESS;
2811 }
2812
2813 /* Fetch the descriptor. */
2814 IEMSELDESC Desc;
2815 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2819 VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822
2823 /* Must be a data segment or readable code segment. */
2824 if ( !Desc.Legacy.Gen.u1DescType
2825 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2826 {
2827 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2828 Desc.Legacy.Gen.u4Type));
2829 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2830 }
2831
2832 /* Check privileges for data segments and non-conforming code segments. */
2833 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2834 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2835 {
2836 /* The RPL and the new CPL must be less than or equal to the DPL. */
2837 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2838 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2839 {
2840 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2841 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2842 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2843 }
2844 }
2845
2846 /* Is it there? */
2847 if (!Desc.Legacy.Gen.u1Present)
2848 {
2849 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2850 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2851 }
2852
2853 /* The base and limit. */
2854 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2855 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2856
2857 /*
2858 * Ok, everything checked out fine. Now set the accessed bit before
2859 * committing the result into the registers.
2860 */
2861 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2862 {
2863 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2864 if (rcStrict != VINF_SUCCESS)
2865 return rcStrict;
2866 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2867 }
2868
2869 /* Commit */
2870 pSReg->Sel = uSel;
2871 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2872 pSReg->u32Limit = cbLimit;
2873 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2874 pSReg->ValidSel = uSel;
2875 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2876 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2877 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2878
2879 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2880 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2881 return VINF_SUCCESS;
2882}
2883
2884
2885/**
2886 * Performs a task switch.
2887 *
2888 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2889 * caller is responsible for performing the necessary checks (like DPL, TSS
2890 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2891 * reference for JMP, CALL, IRET.
2892 *
2893 * If the task switch is the due to a software interrupt or hardware exception,
2894 * the caller is responsible for validating the TSS selector and descriptor. See
2895 * Intel Instruction reference for INT n.
2896 *
2897 * @returns VBox strict status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param enmTaskSwitch The cause of the task switch.
2900 * @param uNextEip The EIP effective after the task switch.
2901 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2902 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2903 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2904 * @param SelTss The TSS selector of the new task.
2905 * @param pNewDescTss Pointer to the new TSS descriptor.
2906 */
2907VBOXSTRICTRC
2908iemTaskSwitch(PVMCPUCC pVCpu,
2909 IEMTASKSWITCH enmTaskSwitch,
2910 uint32_t uNextEip,
2911 uint32_t fFlags,
2912 uint16_t uErr,
2913 uint64_t uCr2,
2914 RTSEL SelTss,
2915 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2916{
2917 Assert(!IEM_IS_REAL_MODE(pVCpu));
2918 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2919 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2920
2921 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2922 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2923 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2924 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2925 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2926
2927 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2928 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2929
2930 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2931 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2932
2933 /* Update CR2 in case it's a page-fault. */
2934 /** @todo This should probably be done much earlier in IEM/PGM. See
2935 * @bugref{5653#c49}. */
2936 if (fFlags & IEM_XCPT_FLAGS_CR2)
2937 pVCpu->cpum.GstCtx.cr2 = uCr2;
2938
2939 /*
2940 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2941 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2942 */
2943 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2944 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2945 if (uNewTssLimit < uNewTssLimitMin)
2946 {
2947 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2948 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2949 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2950 }
2951
2952 /*
2953 * Task switches in VMX non-root mode always cause task switches.
2954 * The new TSS must have been read and validated (DPL, limits etc.) before a
2955 * task-switch VM-exit commences.
2956 *
2957 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2958 */
2959 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2960 {
2961 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2962 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2963 }
2964
2965 /*
2966 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2967 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2968 */
2969 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2970 {
2971 uint64_t const uExitInfo1 = SelTss;
2972 uint64_t uExitInfo2 = uErr;
2973 switch (enmTaskSwitch)
2974 {
2975 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2976 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2977 default: break;
2978 }
2979 if (fFlags & IEM_XCPT_FLAGS_ERR)
2980 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2981 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2982 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2983
2984 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2985 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2986 RT_NOREF2(uExitInfo1, uExitInfo2);
2987 }
2988
2989 /*
2990 * Check the current TSS limit. The last written byte to the current TSS during the
2991 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2992 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2993 *
2994 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2995 * end up with smaller than "legal" TSS limits.
2996 */
2997 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2998 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2999 if (uCurTssLimit < uCurTssLimitMin)
3000 {
3001 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
3002 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
3003 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
3004 }
3005
3006 /*
3007 * Verify that the new TSS can be accessed and map it. Map only the required contents
3008 * and not the entire TSS.
3009 */
3010 uint8_t bUnmapInfoNewTss;
3011 void *pvNewTss;
3012 uint32_t const cbNewTss = uNewTssLimitMin + 1;
3013 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
3014 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3015 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3016 * not perform correct translation if this happens. See Intel spec. 7.2.1
3017 * "Task-State Segment". */
3018 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
3019/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
3020 * Consider wrapping the remainder into a function for simpler cleanup. */
3021 if (rcStrict != VINF_SUCCESS)
3022 {
3023 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
3024 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
3025 return rcStrict;
3026 }
3027
3028 /*
3029 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3030 */
3031 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
3032 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3033 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3034 {
3035 uint8_t bUnmapInfoDescCurTss;
3036 PX86DESC pDescCurTss;
3037 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
3038 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
3039 if (rcStrict != VINF_SUCCESS)
3040 {
3041 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3042 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3043 return rcStrict;
3044 }
3045
3046 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3047 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
3048 if (rcStrict != VINF_SUCCESS)
3049 {
3050 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3051 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3052 return rcStrict;
3053 }
3054
3055 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3056 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3057 {
3058 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3059 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3060 fEFlags &= ~X86_EFL_NT;
3061 }
3062 }
3063
3064 /*
3065 * Save the CPU state into the current TSS.
3066 */
3067 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
3068 if (GCPtrNewTss == GCPtrCurTss)
3069 {
3070 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
3071 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3072 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
3073 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
3074 pVCpu->cpum.GstCtx.ldtr.Sel));
3075 }
3076 if (fIsNewTss386)
3077 {
3078 /*
3079 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3080 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3081 */
3082 uint8_t bUnmapInfoCurTss32;
3083 void *pvCurTss32;
3084 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
3085 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
3086 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3087 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
3088 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
3089 if (rcStrict != VINF_SUCCESS)
3090 {
3091 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
3092 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
3093 return rcStrict;
3094 }
3095
3096 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
3097 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
3098 pCurTss32->eip = uNextEip;
3099 pCurTss32->eflags = fEFlags;
3100 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
3101 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
3102 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
3103 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
3104 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
3105 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
3106 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
3107 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
3108 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
3109 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
3110 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
3111 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
3112 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
3113 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
3114
3115 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
3116 if (rcStrict != VINF_SUCCESS)
3117 {
3118 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3119 VBOXSTRICTRC_VAL(rcStrict)));
3120 return rcStrict;
3121 }
3122 }
3123 else
3124 {
3125 /*
3126 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3127 */
3128 uint8_t bUnmapInfoCurTss16;
3129 void *pvCurTss16;
3130 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
3131 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
3132 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3133 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
3134 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
3135 if (rcStrict != VINF_SUCCESS)
3136 {
3137 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
3138 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
3139 return rcStrict;
3140 }
3141
3142 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
3143 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
3144 pCurTss16->ip = uNextEip;
3145 pCurTss16->flags = (uint16_t)fEFlags;
3146 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
3147 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
3148 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
3149 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
3150 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
3151 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
3152 pCurTss16->si = pVCpu->cpum.GstCtx.si;
3153 pCurTss16->di = pVCpu->cpum.GstCtx.di;
3154 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
3155 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
3156 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
3157 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
3158
3159 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
3160 if (rcStrict != VINF_SUCCESS)
3161 {
3162 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3163 VBOXSTRICTRC_VAL(rcStrict)));
3164 return rcStrict;
3165 }
3166 }
3167
3168 /*
3169 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3170 */
3171 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3172 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3173 {
3174 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3175 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
3176 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
3177 }
3178
3179 /*
3180 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3181 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3182 */
3183 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3184 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3185 bool fNewDebugTrap;
3186 if (fIsNewTss386)
3187 {
3188 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
3189 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
3190 uNewEip = pNewTss32->eip;
3191 uNewEflags = pNewTss32->eflags;
3192 uNewEax = pNewTss32->eax;
3193 uNewEcx = pNewTss32->ecx;
3194 uNewEdx = pNewTss32->edx;
3195 uNewEbx = pNewTss32->ebx;
3196 uNewEsp = pNewTss32->esp;
3197 uNewEbp = pNewTss32->ebp;
3198 uNewEsi = pNewTss32->esi;
3199 uNewEdi = pNewTss32->edi;
3200 uNewES = pNewTss32->es;
3201 uNewCS = pNewTss32->cs;
3202 uNewSS = pNewTss32->ss;
3203 uNewDS = pNewTss32->ds;
3204 uNewFS = pNewTss32->fs;
3205 uNewGS = pNewTss32->gs;
3206 uNewLdt = pNewTss32->selLdt;
3207 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
3208 }
3209 else
3210 {
3211 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
3212 uNewCr3 = 0;
3213 uNewEip = pNewTss16->ip;
3214 uNewEflags = pNewTss16->flags;
3215 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
3216 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
3217 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
3218 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
3219 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
3220 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
3221 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
3222 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
3223 uNewES = pNewTss16->es;
3224 uNewCS = pNewTss16->cs;
3225 uNewSS = pNewTss16->ss;
3226 uNewDS = pNewTss16->ds;
3227 uNewFS = 0;
3228 uNewGS = 0;
3229 uNewLdt = pNewTss16->selLdt;
3230 fNewDebugTrap = false;
3231 }
3232
3233 if (GCPtrNewTss == GCPtrCurTss)
3234 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3235 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3236
3237 /*
3238 * We're done accessing the new TSS.
3239 */
3240 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
3241 if (rcStrict != VINF_SUCCESS)
3242 {
3243 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /*
3248 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3249 */
3250 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3251 {
3252 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
3253 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
3254 if (rcStrict != VINF_SUCCESS)
3255 {
3256 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3257 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3258 return rcStrict;
3259 }
3260
3261 /* Check that the descriptor indicates the new TSS is available (not busy). */
3262 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3263 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3264 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
3265
3266 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3267 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
3268 if (rcStrict != VINF_SUCCESS)
3269 {
3270 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3271 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3272 return rcStrict;
3273 }
3274 }
3275
3276 /*
3277 * From this point on, we're technically in the new task. We will defer exceptions
3278 * until the completion of the task switch but before executing any instructions in the new task.
3279 */
3280 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
3281 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
3282 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3283 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
3284 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
3285 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
3286 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3287
3288 /* Set the busy bit in TR. */
3289 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3290
3291 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3292 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3293 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3294 {
3295 uNewEflags |= X86_EFL_NT;
3296 }
3297
3298 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3299 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3300 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3301
3302 pVCpu->cpum.GstCtx.eip = uNewEip;
3303 pVCpu->cpum.GstCtx.eax = uNewEax;
3304 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3305 pVCpu->cpum.GstCtx.edx = uNewEdx;
3306 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3307 pVCpu->cpum.GstCtx.esp = uNewEsp;
3308 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3309 pVCpu->cpum.GstCtx.esi = uNewEsi;
3310 pVCpu->cpum.GstCtx.edi = uNewEdi;
3311
3312 uNewEflags &= X86_EFL_LIVE_MASK;
3313 uNewEflags |= X86_EFL_RA1_MASK;
3314 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3315
3316 /*
3317 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3318 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3319 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3320 */
3321 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3322 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3323
3324 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3325 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3326
3327 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3328 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3329
3330 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3331 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3332
3333 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3334 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3335
3336 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3337 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3338 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3339
3340 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3341 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3342 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3343 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3344
3345 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3346 {
3347 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3348 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3349 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3350 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3351 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3352 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3353 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3354 }
3355
3356 /*
3357 * Switch CR3 for the new task.
3358 */
3359 if ( fIsNewTss386
3360 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3361 {
3362 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3363 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3364 AssertRCSuccessReturn(rc, rc);
3365
3366 /* Inform PGM. */
3367 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3368 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3369 AssertRCReturn(rc, rc);
3370 /* ignore informational status codes */
3371
3372 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3373 }
3374
3375 /*
3376 * Switch LDTR for the new task.
3377 */
3378 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3379 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3380 else
3381 {
3382 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3383
3384 IEMSELDESC DescNewLdt;
3385 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3386 if (rcStrict != VINF_SUCCESS)
3387 {
3388 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3389 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3390 return rcStrict;
3391 }
3392 if ( !DescNewLdt.Legacy.Gen.u1Present
3393 || DescNewLdt.Legacy.Gen.u1DescType
3394 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3395 {
3396 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3397 uNewLdt, DescNewLdt.Legacy.u));
3398 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3399 }
3400
3401 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3402 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3403 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3404 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3405 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3406 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3407 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3408 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3409 }
3410
3411 IEMSELDESC DescSS;
3412 if (IEM_IS_V86_MODE(pVCpu))
3413 {
3414 IEM_SET_CPL(pVCpu, 3);
3415 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3416 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3417 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3418 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3419 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3421
3422 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3423 DescSS.Legacy.u = 0;
3424 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3425 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3426 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3427 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3428 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3429 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3430 DescSS.Legacy.Gen.u2Dpl = 3;
3431 }
3432 else
3433 {
3434 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3435
3436 /*
3437 * Load the stack segment for the new task.
3438 */
3439 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3440 {
3441 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3442 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3443 }
3444
3445 /* Fetch the descriptor. */
3446 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3447 if (rcStrict != VINF_SUCCESS)
3448 {
3449 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3450 VBOXSTRICTRC_VAL(rcStrict)));
3451 return rcStrict;
3452 }
3453
3454 /* SS must be a data segment and writable. */
3455 if ( !DescSS.Legacy.Gen.u1DescType
3456 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3457 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3458 {
3459 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3460 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3461 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3462 }
3463
3464 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3465 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3466 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3467 {
3468 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3469 uNewCpl));
3470 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3471 }
3472
3473 /* Is it there? */
3474 if (!DescSS.Legacy.Gen.u1Present)
3475 {
3476 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3477 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3478 }
3479
3480 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3481 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3482
3483 /* Set the accessed bit before committing the result into SS. */
3484 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3485 {
3486 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3490 }
3491
3492 /* Commit SS. */
3493 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3494 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3495 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3496 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3497 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3498 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3499 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3500
3501 /* CPL has changed, update IEM before loading rest of segments. */
3502 IEM_SET_CPL(pVCpu, uNewCpl);
3503
3504 /*
3505 * Load the data segments for the new task.
3506 */
3507 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3508 if (rcStrict != VINF_SUCCESS)
3509 return rcStrict;
3510 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3511 if (rcStrict != VINF_SUCCESS)
3512 return rcStrict;
3513 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3514 if (rcStrict != VINF_SUCCESS)
3515 return rcStrict;
3516 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3517 if (rcStrict != VINF_SUCCESS)
3518 return rcStrict;
3519
3520 /*
3521 * Load the code segment for the new task.
3522 */
3523 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3524 {
3525 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3527 }
3528
3529 /* Fetch the descriptor. */
3530 IEMSELDESC DescCS;
3531 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3532 if (rcStrict != VINF_SUCCESS)
3533 {
3534 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3535 return rcStrict;
3536 }
3537
3538 /* CS must be a code segment. */
3539 if ( !DescCS.Legacy.Gen.u1DescType
3540 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3541 {
3542 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3543 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3544 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3545 }
3546
3547 /* For conforming CS, DPL must be less than or equal to the RPL. */
3548 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3549 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3550 {
3551 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3552 DescCS.Legacy.Gen.u2Dpl));
3553 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3554 }
3555
3556 /* For non-conforming CS, DPL must match RPL. */
3557 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3558 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3559 {
3560 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3561 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3563 }
3564
3565 /* Is it there? */
3566 if (!DescCS.Legacy.Gen.u1Present)
3567 {
3568 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3569 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3570 }
3571
3572 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3573 u64Base = X86DESC_BASE(&DescCS.Legacy);
3574
3575 /* Set the accessed bit before committing the result into CS. */
3576 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3577 {
3578 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3579 if (rcStrict != VINF_SUCCESS)
3580 return rcStrict;
3581 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3582 }
3583
3584 /* Commit CS. */
3585 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3586 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3587 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3588 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3589 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3590 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3591 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3592 }
3593
3594 /* Make sure the CPU mode is correct. */
3595 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3596 if (fExecNew != pVCpu->iem.s.fExec)
3597 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3598 pVCpu->iem.s.fExec = fExecNew;
3599
3600 /** @todo Debug trap. */
3601 if (fIsNewTss386 && fNewDebugTrap)
3602 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3603
3604 /*
3605 * Construct the error code masks based on what caused this task switch.
3606 * See Intel Instruction reference for INT.
3607 */
3608 uint16_t uExt;
3609 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3610 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3611 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3612 uExt = 1;
3613 else
3614 uExt = 0;
3615
3616 /*
3617 * Push any error code on to the new stack.
3618 */
3619 if (fFlags & IEM_XCPT_FLAGS_ERR)
3620 {
3621 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3622 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3623 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3624
3625 /* Check that there is sufficient space on the stack. */
3626 /** @todo Factor out segment limit checking for normal/expand down segments
3627 * into a separate function. */
3628 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3629 {
3630 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3631 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3632 {
3633 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3634 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3635 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3636 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3637 }
3638 }
3639 else
3640 {
3641 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3642 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3643 {
3644 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3645 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3646 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3647 }
3648 }
3649
3650
3651 if (fIsNewTss386)
3652 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3653 else
3654 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3655 if (rcStrict != VINF_SUCCESS)
3656 {
3657 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3658 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3659 return rcStrict;
3660 }
3661 }
3662
3663 /* Check the new EIP against the new CS limit. */
3664 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3665 {
3666 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3667 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3668 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3669 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3670 }
3671
3672 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3673 pVCpu->cpum.GstCtx.ss.Sel));
3674 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3675}
3676
3677
3678/**
3679 * Implements exceptions and interrupts for protected mode.
3680 *
3681 * @returns VBox strict status code.
3682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3683 * @param cbInstr The number of bytes to offset rIP by in the return
3684 * address.
3685 * @param u8Vector The interrupt / exception vector number.
3686 * @param fFlags The flags.
3687 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3688 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3689 */
3690static VBOXSTRICTRC
3691iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3692 uint8_t cbInstr,
3693 uint8_t u8Vector,
3694 uint32_t fFlags,
3695 uint16_t uErr,
3696 uint64_t uCr2) RT_NOEXCEPT
3697{
3698 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3699
3700 /*
3701 * Read the IDT entry.
3702 */
3703 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3704 {
3705 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3706 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3707 }
3708 X86DESC Idte;
3709 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3710 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3711 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3712 {
3713 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3714 return rcStrict;
3715 }
3716 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3717 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3718 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3719 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3720
3721 /*
3722 * Check the descriptor type, DPL and such.
3723 * ASSUMES this is done in the same order as described for call-gate calls.
3724 */
3725 if (Idte.Gate.u1DescType)
3726 {
3727 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3728 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3729 }
3730 bool fTaskGate = false;
3731 uint8_t f32BitGate = true;
3732 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3733 switch (Idte.Gate.u4Type)
3734 {
3735 case X86_SEL_TYPE_SYS_UNDEFINED:
3736 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3737 case X86_SEL_TYPE_SYS_LDT:
3738 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3739 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3740 case X86_SEL_TYPE_SYS_UNDEFINED2:
3741 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3742 case X86_SEL_TYPE_SYS_UNDEFINED3:
3743 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3744 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3745 case X86_SEL_TYPE_SYS_UNDEFINED4:
3746 {
3747 /** @todo check what actually happens when the type is wrong...
3748 * esp. call gates. */
3749 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3750 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3751 }
3752
3753 case X86_SEL_TYPE_SYS_286_INT_GATE:
3754 f32BitGate = false;
3755 RT_FALL_THRU();
3756 case X86_SEL_TYPE_SYS_386_INT_GATE:
3757 fEflToClear |= X86_EFL_IF;
3758 break;
3759
3760 case X86_SEL_TYPE_SYS_TASK_GATE:
3761 fTaskGate = true;
3762#ifndef IEM_IMPLEMENTS_TASKSWITCH
3763 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3764#endif
3765 break;
3766
3767 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3768 f32BitGate = false;
3769 break;
3770 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3771 break;
3772
3773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3774 }
3775
3776 /* Check DPL against CPL if applicable. */
3777 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3778 {
3779 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3780 {
3781 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3782 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3783 }
3784 }
3785
3786 /* Is it there? */
3787 if (!Idte.Gate.u1Present)
3788 {
3789 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3790 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3791 }
3792
3793 /* Is it a task-gate? */
3794 if (fTaskGate)
3795 {
3796 /*
3797 * Construct the error code masks based on what caused this task switch.
3798 * See Intel Instruction reference for INT.
3799 */
3800 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3801 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3802 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3803 RTSEL SelTss = Idte.Gate.u16Sel;
3804
3805 /*
3806 * Fetch the TSS descriptor in the GDT.
3807 */
3808 IEMSELDESC DescTSS;
3809 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3810 if (rcStrict != VINF_SUCCESS)
3811 {
3812 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3813 VBOXSTRICTRC_VAL(rcStrict)));
3814 return rcStrict;
3815 }
3816
3817 /* The TSS descriptor must be a system segment and be available (not busy). */
3818 if ( DescTSS.Legacy.Gen.u1DescType
3819 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3820 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3821 {
3822 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3823 u8Vector, SelTss, DescTSS.Legacy.au64));
3824 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3825 }
3826
3827 /* The TSS must be present. */
3828 if (!DescTSS.Legacy.Gen.u1Present)
3829 {
3830 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3831 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3832 }
3833
3834 /* Do the actual task switch. */
3835 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3836 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3837 fFlags, uErr, uCr2, SelTss, &DescTSS);
3838 }
3839
3840 /* A null CS is bad. */
3841 RTSEL NewCS = Idte.Gate.u16Sel;
3842 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3843 {
3844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3845 return iemRaiseGeneralProtectionFault0(pVCpu);
3846 }
3847
3848 /* Fetch the descriptor for the new CS. */
3849 IEMSELDESC DescCS;
3850 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3851 if (rcStrict != VINF_SUCCESS)
3852 {
3853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3854 return rcStrict;
3855 }
3856
3857 /* Must be a code segment. */
3858 if (!DescCS.Legacy.Gen.u1DescType)
3859 {
3860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3861 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3862 }
3863 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3864 {
3865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3866 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3867 }
3868
3869 /* Don't allow lowering the privilege level. */
3870 /** @todo Does the lowering of privileges apply to software interrupts
3871 * only? This has bearings on the more-privileged or
3872 * same-privilege stack behavior further down. A testcase would
3873 * be nice. */
3874 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3875 {
3876 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3877 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3878 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3879 }
3880
3881 /* Make sure the selector is present. */
3882 if (!DescCS.Legacy.Gen.u1Present)
3883 {
3884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3885 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3886 }
3887
3888#ifdef LOG_ENABLED
3889 /* If software interrupt, try decode it if logging is enabled and such. */
3890 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3891 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3892 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3893#endif
3894
3895 /* Check the new EIP against the new CS limit. */
3896 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3897 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3898 ? Idte.Gate.u16OffsetLow
3899 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3900 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3901 if (uNewEip > cbLimitCS)
3902 {
3903 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3904 u8Vector, uNewEip, cbLimitCS, NewCS));
3905 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3906 }
3907 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3908
3909 /* Calc the flag image to push. */
3910 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3911 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 fEfl &= ~X86_EFL_RF;
3913 else
3914 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3915
3916 /* From V8086 mode only go to CPL 0. */
3917 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3918 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3919 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3920 {
3921 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3922 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3923 }
3924
3925 /*
3926 * If the privilege level changes, we need to get a new stack from the TSS.
3927 * This in turns means validating the new SS and ESP...
3928 */
3929 if (uNewCpl != IEM_GET_CPL(pVCpu))
3930 {
3931 RTSEL NewSS;
3932 uint32_t uNewEsp;
3933 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3934 if (rcStrict != VINF_SUCCESS)
3935 return rcStrict;
3936
3937 IEMSELDESC DescSS;
3938 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3939 if (rcStrict != VINF_SUCCESS)
3940 return rcStrict;
3941 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3942 if (!DescSS.Legacy.Gen.u1DefBig)
3943 {
3944 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3945 uNewEsp = (uint16_t)uNewEsp;
3946 }
3947
3948 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3949
3950 /* Check that there is sufficient space for the stack frame. */
3951 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3952 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3953 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3954 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3955
3956 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3957 {
3958 if ( uNewEsp - 1 > cbLimitSS
3959 || uNewEsp < cbStackFrame)
3960 {
3961 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3962 u8Vector, NewSS, uNewEsp, cbStackFrame));
3963 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3964 }
3965 }
3966 else
3967 {
3968 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3969 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3970 {
3971 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3972 u8Vector, NewSS, uNewEsp, cbStackFrame));
3973 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3974 }
3975 }
3976
3977 /*
3978 * Start making changes.
3979 */
3980
3981 /* Set the new CPL so that stack accesses use it. */
3982 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3983 IEM_SET_CPL(pVCpu, uNewCpl);
3984
3985 /* Create the stack frame. */
3986 uint8_t bUnmapInfoStackFrame;
3987 RTPTRUNION uStackFrame;
3988 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3989 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3990 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3991 if (rcStrict != VINF_SUCCESS)
3992 return rcStrict;
3993 if (f32BitGate)
3994 {
3995 if (fFlags & IEM_XCPT_FLAGS_ERR)
3996 *uStackFrame.pu32++ = uErr;
3997 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3998 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3999 uStackFrame.pu32[2] = fEfl;
4000 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4001 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4002 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4003 if (fEfl & X86_EFL_VM)
4004 {
4005 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4006 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4007 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4008 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4009 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4010 }
4011 }
4012 else
4013 {
4014 if (fFlags & IEM_XCPT_FLAGS_ERR)
4015 *uStackFrame.pu16++ = uErr;
4016 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4017 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4018 uStackFrame.pu16[2] = fEfl;
4019 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4020 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4021 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4022 if (fEfl & X86_EFL_VM)
4023 {
4024 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4025 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4026 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4027 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4028 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4029 }
4030 }
4031 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4032 if (rcStrict != VINF_SUCCESS)
4033 return rcStrict;
4034
4035 /* Mark the selectors 'accessed' (hope this is the correct time). */
4036 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4037 * after pushing the stack frame? (Write protect the gdt + stack to
4038 * find out.) */
4039 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4040 {
4041 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4042 if (rcStrict != VINF_SUCCESS)
4043 return rcStrict;
4044 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4045 }
4046
4047 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4048 {
4049 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4050 if (rcStrict != VINF_SUCCESS)
4051 return rcStrict;
4052 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4053 }
4054
4055 /*
4056 * Start comitting the register changes (joins with the DPL=CPL branch).
4057 */
4058 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
4059 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
4060 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4061 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
4062 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4063 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4064 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4065 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4066 * SP is loaded).
4067 * Need to check the other combinations too:
4068 * - 16-bit TSS, 32-bit handler
4069 * - 32-bit TSS, 16-bit handler */
4070 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
4071 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
4072 else
4073 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
4074
4075 if (fEfl & X86_EFL_VM)
4076 {
4077 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
4078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
4079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
4080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
4081 }
4082 }
4083 /*
4084 * Same privilege, no stack change and smaller stack frame.
4085 */
4086 else
4087 {
4088 uint64_t uNewRsp;
4089 uint8_t bUnmapInfoStackFrame;
4090 RTPTRUNION uStackFrame;
4091 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4092 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
4093 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
4094 if (rcStrict != VINF_SUCCESS)
4095 return rcStrict;
4096
4097 if (f32BitGate)
4098 {
4099 if (fFlags & IEM_XCPT_FLAGS_ERR)
4100 *uStackFrame.pu32++ = uErr;
4101 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4102 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
4103 uStackFrame.pu32[2] = fEfl;
4104 }
4105 else
4106 {
4107 if (fFlags & IEM_XCPT_FLAGS_ERR)
4108 *uStackFrame.pu16++ = uErr;
4109 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4110 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
4111 uStackFrame.pu16[2] = fEfl;
4112 }
4113 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
4114 if (rcStrict != VINF_SUCCESS)
4115 return rcStrict;
4116
4117 /* Mark the CS selector as 'accessed'. */
4118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4119 {
4120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4121 if (rcStrict != VINF_SUCCESS)
4122 return rcStrict;
4123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4124 }
4125
4126 /*
4127 * Start committing the register changes (joins with the other branch).
4128 */
4129 pVCpu->cpum.GstCtx.rsp = uNewRsp;
4130 }
4131
4132 /* ... register committing continues. */
4133 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4134 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4135 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4136 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
4137 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4138 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4139
4140 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4141 fEfl &= ~fEflToClear;
4142 IEMMISC_SET_EFL(pVCpu, fEfl);
4143
4144 if (fFlags & IEM_XCPT_FLAGS_CR2)
4145 pVCpu->cpum.GstCtx.cr2 = uCr2;
4146
4147 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4148 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4149
4150 /* Make sure the execution flags are correct. */
4151 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
4152 if (fExecNew != pVCpu->iem.s.fExec)
4153 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
4154 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
4155 pVCpu->iem.s.fExec = fExecNew;
4156 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
4157
4158 /*
4159 * Deal with debug events that follows the exception and clear inhibit flags.
4160 */
4161 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4162 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4163 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4164 else
4165 {
4166 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
4167 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4168 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4169 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4170 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4171 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4172 return iemRaiseDebugException(pVCpu);
4173 }
4174
4175 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4176}
4177
4178
4179/**
4180 * Implements exceptions and interrupts for long mode.
4181 *
4182 * @returns VBox strict status code.
4183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4184 * @param cbInstr The number of bytes to offset rIP by in the return
4185 * address.
4186 * @param u8Vector The interrupt / exception vector number.
4187 * @param fFlags The flags.
4188 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4189 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4190 */
4191static VBOXSTRICTRC
4192iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
4193 uint8_t cbInstr,
4194 uint8_t u8Vector,
4195 uint32_t fFlags,
4196 uint16_t uErr,
4197 uint64_t uCr2) RT_NOEXCEPT
4198{
4199 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4200
4201 /*
4202 * Read the IDT entry.
4203 */
4204 uint16_t offIdt = (uint16_t)u8Vector << 4;
4205 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
4206 {
4207 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4208 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4209 }
4210 X86DESC64 Idte;
4211#ifdef _MSC_VER /* Shut up silly compiler warning. */
4212 Idte.au64[0] = 0;
4213 Idte.au64[1] = 0;
4214#endif
4215 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
4216 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4217 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
4218 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4219 {
4220 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4221 return rcStrict;
4222 }
4223 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4224 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4225 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4226
4227 /*
4228 * Check the descriptor type, DPL and such.
4229 * ASSUMES this is done in the same order as described for call-gate calls.
4230 */
4231 if (Idte.Gate.u1DescType)
4232 {
4233 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4234 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4235 }
4236 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4237 switch (Idte.Gate.u4Type)
4238 {
4239 case AMD64_SEL_TYPE_SYS_INT_GATE:
4240 fEflToClear |= X86_EFL_IF;
4241 break;
4242 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4243 break;
4244
4245 default:
4246 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4247 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4248 }
4249
4250 /* Check DPL against CPL if applicable. */
4251 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4252 {
4253 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
4254 {
4255 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
4256 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4257 }
4258 }
4259
4260 /* Is it there? */
4261 if (!Idte.Gate.u1Present)
4262 {
4263 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4264 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4265 }
4266
4267 /* A null CS is bad. */
4268 RTSEL NewCS = Idte.Gate.u16Sel;
4269 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4270 {
4271 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4272 return iemRaiseGeneralProtectionFault0(pVCpu);
4273 }
4274
4275 /* Fetch the descriptor for the new CS. */
4276 IEMSELDESC DescCS;
4277 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4278 if (rcStrict != VINF_SUCCESS)
4279 {
4280 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4281 return rcStrict;
4282 }
4283
4284 /* Must be a 64-bit code segment. */
4285 if (!DescCS.Long.Gen.u1DescType)
4286 {
4287 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4288 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4289 }
4290 if ( !DescCS.Long.Gen.u1Long
4291 || DescCS.Long.Gen.u1DefBig
4292 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4293 {
4294 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4295 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4297 }
4298
4299 /* Don't allow lowering the privilege level. For non-conforming CS
4300 selectors, the CS.DPL sets the privilege level the trap/interrupt
4301 handler runs at. For conforming CS selectors, the CPL remains
4302 unchanged, but the CS.DPL must be <= CPL. */
4303 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4304 * when CPU in Ring-0. Result \#GP? */
4305 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4306 {
4307 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4308 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4309 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4310 }
4311
4312
4313 /* Make sure the selector is present. */
4314 if (!DescCS.Legacy.Gen.u1Present)
4315 {
4316 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4317 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4318 }
4319
4320 /* Check that the new RIP is canonical. */
4321 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4322 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4323 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4324 if (!IEM_IS_CANONICAL(uNewRip))
4325 {
4326 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4327 return iemRaiseGeneralProtectionFault0(pVCpu);
4328 }
4329
4330 /*
4331 * If the privilege level changes or if the IST isn't zero, we need to get
4332 * a new stack from the TSS.
4333 */
4334 uint64_t uNewRsp;
4335 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4336 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4337 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4338 || Idte.Gate.u3IST != 0)
4339 {
4340 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4341 if (rcStrict != VINF_SUCCESS)
4342 return rcStrict;
4343 }
4344 else
4345 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4346 uNewRsp &= ~(uint64_t)0xf;
4347
4348 /*
4349 * Calc the flag image to push.
4350 */
4351 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4352 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4353 fEfl &= ~X86_EFL_RF;
4354 else
4355 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4356
4357 /*
4358 * Start making changes.
4359 */
4360 /* Set the new CPL so that stack accesses use it. */
4361 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4362 IEM_SET_CPL(pVCpu, uNewCpl);
4363/** @todo Setting CPL this early seems wrong as it would affect and errors we
4364 * raise accessing the stack and (?) GDT/LDT... */
4365
4366 /* Create the stack frame. */
4367 uint8_t bUnmapInfoStackFrame;
4368 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4369 RTPTRUNION uStackFrame;
4370 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4371 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4372 if (rcStrict != VINF_SUCCESS)
4373 return rcStrict;
4374
4375 if (fFlags & IEM_XCPT_FLAGS_ERR)
4376 *uStackFrame.pu64++ = uErr;
4377 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4378 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4379 uStackFrame.pu64[2] = fEfl;
4380 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4381 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4382 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4383 if (rcStrict != VINF_SUCCESS)
4384 return rcStrict;
4385
4386 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4387 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4388 * after pushing the stack frame? (Write protect the gdt + stack to
4389 * find out.) */
4390 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4391 {
4392 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4393 if (rcStrict != VINF_SUCCESS)
4394 return rcStrict;
4395 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4396 }
4397
4398 /*
4399 * Start comitting the register changes.
4400 */
4401 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4402 * hidden registers when interrupting 32-bit or 16-bit code! */
4403 if (uNewCpl != uOldCpl)
4404 {
4405 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4406 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4407 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4408 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4409 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4410 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4411 }
4412 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4413 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4414 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4415 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4416 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4417 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4418 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4419 pVCpu->cpum.GstCtx.rip = uNewRip;
4420
4421 fEfl &= ~fEflToClear;
4422 IEMMISC_SET_EFL(pVCpu, fEfl);
4423
4424 if (fFlags & IEM_XCPT_FLAGS_CR2)
4425 pVCpu->cpum.GstCtx.cr2 = uCr2;
4426
4427 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4428 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4429
4430 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4431
4432 /*
4433 * Deal with debug events that follows the exception and clear inhibit flags.
4434 */
4435 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4436 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4437 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4438 else
4439 {
4440 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4441 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4442 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4443 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4444 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4445 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4446 return iemRaiseDebugException(pVCpu);
4447 }
4448
4449 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4450}
4451
4452
4453/**
4454 * Implements exceptions and interrupts.
4455 *
4456 * All exceptions and interrupts goes thru this function!
4457 *
4458 * @returns VBox strict status code.
4459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4460 * @param cbInstr The number of bytes to offset rIP by in the return
4461 * address.
4462 * @param u8Vector The interrupt / exception vector number.
4463 * @param fFlags The flags.
4464 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4465 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4466 */
4467VBOXSTRICTRC
4468iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4469 uint8_t cbInstr,
4470 uint8_t u8Vector,
4471 uint32_t fFlags,
4472 uint16_t uErr,
4473 uint64_t uCr2) RT_NOEXCEPT
4474{
4475 /*
4476 * Get all the state that we might need here.
4477 */
4478 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4479 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4480
4481#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4482 /*
4483 * Flush prefetch buffer
4484 */
4485 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4486#endif
4487
4488 /*
4489 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4490 */
4491 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4492 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4493 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4494 | IEM_XCPT_FLAGS_BP_INSTR
4495 | IEM_XCPT_FLAGS_ICEBP_INSTR
4496 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4497 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4498 {
4499 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4500 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4501 u8Vector = X86_XCPT_GP;
4502 uErr = 0;
4503 }
4504
4505 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4506#ifdef DBGFTRACE_ENABLED
4507 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4508 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4509 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4510#endif
4511
4512 /*
4513 * Check if DBGF wants to intercept the exception.
4514 */
4515 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4516 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4517 { /* likely */ }
4518 else
4519 {
4520 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4521 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4522 if (rcStrict != VINF_SUCCESS)
4523 return rcStrict;
4524 }
4525
4526 /*
4527 * Evaluate whether NMI blocking should be in effect.
4528 * Normally, NMI blocking is in effect whenever we inject an NMI.
4529 */
4530 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4531 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4532
4533#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4534 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4535 {
4536 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4537 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4538 return rcStrict0;
4539
4540 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4541 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4542 {
4543 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4544 fBlockNmi = false;
4545 }
4546 }
4547#endif
4548
4549#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4550 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4551 {
4552 /*
4553 * If the event is being injected as part of VMRUN, it isn't subject to event
4554 * intercepts in the nested-guest. However, secondary exceptions that occur
4555 * during injection of any event -are- subject to exception intercepts.
4556 *
4557 * See AMD spec. 15.20 "Event Injection".
4558 */
4559 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4560 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4561 else
4562 {
4563 /*
4564 * Check and handle if the event being raised is intercepted.
4565 */
4566 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4567 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4568 return rcStrict0;
4569 }
4570 }
4571#endif
4572
4573 /*
4574 * Set NMI blocking if necessary.
4575 */
4576 if (fBlockNmi)
4577 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4578
4579 /*
4580 * Do recursion accounting.
4581 */
4582 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4583 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4584 if (pVCpu->iem.s.cXcptRecursions == 0)
4585 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4586 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4587 else
4588 {
4589 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4590 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4591 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4592
4593 if (pVCpu->iem.s.cXcptRecursions >= 4)
4594 {
4595#ifdef DEBUG_bird
4596 AssertFailed();
4597#endif
4598 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4599 }
4600
4601 /*
4602 * Evaluate the sequence of recurring events.
4603 */
4604 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4605 NULL /* pXcptRaiseInfo */);
4606 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4607 { /* likely */ }
4608 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4609 {
4610 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4611 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4612 u8Vector = X86_XCPT_DF;
4613 uErr = 0;
4614#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4615 /* VMX nested-guest #DF intercept needs to be checked here. */
4616 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4617 {
4618 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4619 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4620 return rcStrict0;
4621 }
4622#endif
4623 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4624 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4625 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4626 }
4627 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4628 {
4629 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4630 return iemInitiateCpuShutdown(pVCpu);
4631 }
4632 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4633 {
4634 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4635 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4636 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4637 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4638 return VERR_EM_GUEST_CPU_HANG;
4639 }
4640 else
4641 {
4642 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4643 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4644 return VERR_IEM_IPE_9;
4645 }
4646
4647 /*
4648 * The 'EXT' bit is set when an exception occurs during deliver of an external
4649 * event (such as an interrupt or earlier exception)[1]. Privileged software
4650 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4651 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4652 *
4653 * [1] - Intel spec. 6.13 "Error Code"
4654 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4655 * [3] - Intel Instruction reference for INT n.
4656 */
4657 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4658 && (fFlags & IEM_XCPT_FLAGS_ERR)
4659 && u8Vector != X86_XCPT_PF
4660 && u8Vector != X86_XCPT_DF)
4661 {
4662 uErr |= X86_TRAP_ERR_EXTERNAL;
4663 }
4664 }
4665
4666 pVCpu->iem.s.cXcptRecursions++;
4667 pVCpu->iem.s.uCurXcpt = u8Vector;
4668 pVCpu->iem.s.fCurXcpt = fFlags;
4669 pVCpu->iem.s.uCurXcptErr = uErr;
4670 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4671
4672 /*
4673 * Extensive logging.
4674 */
4675#if defined(LOG_ENABLED) && defined(IN_RING3)
4676 if (LogIs3Enabled())
4677 {
4678 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4679 char szRegs[4096];
4680 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4681 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4682 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4683 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4684 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4685 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4686 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4687 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4688 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4689 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4690 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4691 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4692 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4693 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4694 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4695 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4696 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4697 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4698 " efer=%016VR{efer}\n"
4699 " pat=%016VR{pat}\n"
4700 " sf_mask=%016VR{sf_mask}\n"
4701 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4702 " lstar=%016VR{lstar}\n"
4703 " star=%016VR{star} cstar=%016VR{cstar}\n"
4704 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4705 );
4706
4707 char szInstr[256];
4708 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4709 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4710 szInstr, sizeof(szInstr), NULL);
4711 Log3(("%s%s\n", szRegs, szInstr));
4712 }
4713#endif /* LOG_ENABLED */
4714
4715 /*
4716 * Stats.
4717 */
4718 uint64_t const uTimestamp = ASMReadTSC();
4719 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4720 {
4721 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4722 EMHistoryAddExit(pVCpu,
4723 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4724 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4725 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4726 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4727 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
4728 }
4729 else
4730 {
4731 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4732 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4733 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4734 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4735 if (fFlags & IEM_XCPT_FLAGS_ERR)
4736 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4737 if (fFlags & IEM_XCPT_FLAGS_CR2)
4738 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4739 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
4740 }
4741
4742 /*
4743 * Hack alert! Convert incoming debug events to slient on Intel.
4744 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4745 */
4746 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4747 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4748 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4749 { /* ignore */ }
4750 else
4751 {
4752 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4753 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4754 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4755 | CPUMCTX_DBG_HIT_DRX_SILENT;
4756 }
4757
4758 /*
4759 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4760 * to ensure that a stale TLB or paging cache entry will only cause one
4761 * spurious #PF.
4762 */
4763 if ( u8Vector == X86_XCPT_PF
4764 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4765 IEMTlbInvalidatePage(pVCpu, uCr2);
4766
4767 /*
4768 * Call the mode specific worker function.
4769 */
4770 VBOXSTRICTRC rcStrict;
4771 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4772 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4773 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4774 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4775 else
4776 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4777
4778 /* Flush the prefetch buffer. */
4779 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4780
4781 /*
4782 * Unwind.
4783 */
4784 pVCpu->iem.s.cXcptRecursions--;
4785 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4786 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4787 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4788 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4789 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4790 return rcStrict;
4791}
4792
4793#ifdef IEM_WITH_SETJMP
4794/**
4795 * See iemRaiseXcptOrInt. Will not return.
4796 */
4797DECL_NO_RETURN(void)
4798iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4799 uint8_t cbInstr,
4800 uint8_t u8Vector,
4801 uint32_t fFlags,
4802 uint16_t uErr,
4803 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4804{
4805 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4806 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4807}
4808#endif
4809
4810
4811/** \#DE - 00. */
4812VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4813{
4814 if (GCMIsInterceptingXcptDE(pVCpu))
4815 {
4816 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4817 if (rc == VINF_SUCCESS)
4818 {
4819 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4820 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4821 }
4822 }
4823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4824}
4825
4826
4827#ifdef IEM_WITH_SETJMP
4828/** \#DE - 00. */
4829DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4830{
4831 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4832}
4833#endif
4834
4835
4836/** \#DB - 01.
4837 * @note This automatically clear DR7.GD. */
4838VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4839{
4840 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4841 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4843}
4844
4845
4846/** \#BR - 05. */
4847VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4848{
4849 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4850}
4851
4852
4853/** \#UD - 06. */
4854VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4855{
4856 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4857}
4858
4859
4860#ifdef IEM_WITH_SETJMP
4861/** \#UD - 06. */
4862DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4863{
4864 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4865}
4866#endif
4867
4868
4869/** \#NM - 07. */
4870VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4871{
4872 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4873}
4874
4875
4876#ifdef IEM_WITH_SETJMP
4877/** \#NM - 07. */
4878DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4879{
4880 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4881}
4882#endif
4883
4884
4885/** \#TS(err) - 0a. */
4886VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4887{
4888 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4889}
4890
4891
4892/** \#TS(tr) - 0a. */
4893VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4894{
4895 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4896 pVCpu->cpum.GstCtx.tr.Sel, 0);
4897}
4898
4899
4900/** \#TS(0) - 0a. */
4901VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4902{
4903 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4904 0, 0);
4905}
4906
4907
4908/** \#TS(err) - 0a. */
4909VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4910{
4911 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4912 uSel & X86_SEL_MASK_OFF_RPL, 0);
4913}
4914
4915
4916/** \#NP(err) - 0b. */
4917VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4918{
4919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4920}
4921
4922
4923/** \#NP(sel) - 0b. */
4924VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4925{
4926 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4927 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4929 uSel & ~X86_SEL_RPL, 0);
4930}
4931
4932
4933/** \#SS(seg) - 0c. */
4934VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4935{
4936 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4937 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4939 uSel & ~X86_SEL_RPL, 0);
4940}
4941
4942
4943/** \#SS(err) - 0c. */
4944VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4945{
4946 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4947 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4949}
4950
4951
4952/** \#GP(n) - 0d. */
4953VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4954{
4955 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4957}
4958
4959
4960/** \#GP(0) - 0d. */
4961VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4962{
4963 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4964 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4965}
4966
4967#ifdef IEM_WITH_SETJMP
4968/** \#GP(0) - 0d. */
4969DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4970{
4971 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4972 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4973}
4974#endif
4975
4976
4977/** \#GP(sel) - 0d. */
4978VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4979{
4980 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4981 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4982 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4983 Sel & ~X86_SEL_RPL, 0);
4984}
4985
4986
4987/** \#GP(0) - 0d. */
4988VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4989{
4990 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4991 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4992}
4993
4994
4995/** \#GP(sel) - 0d. */
4996VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4997{
4998 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4999 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5000 NOREF(iSegReg); NOREF(fAccess);
5001 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5002 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5003}
5004
5005#ifdef IEM_WITH_SETJMP
5006/** \#GP(sel) - 0d, longjmp. */
5007DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
5008{
5009 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
5010 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5011 NOREF(iSegReg); NOREF(fAccess);
5012 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5013 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5014}
5015#endif
5016
5017/** \#GP(sel) - 0d. */
5018VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
5019{
5020 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
5021 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
5022 NOREF(Sel);
5023 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5024}
5025
5026#ifdef IEM_WITH_SETJMP
5027/** \#GP(sel) - 0d, longjmp. */
5028DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
5029{
5030 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
5031 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
5032 NOREF(Sel);
5033 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5034}
5035#endif
5036
5037
5038/** \#GP(sel) - 0d. */
5039VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
5040{
5041 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
5042 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
5043 NOREF(iSegReg); NOREF(fAccess);
5044 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5045}
5046
5047#ifdef IEM_WITH_SETJMP
5048/** \#GP(sel) - 0d, longjmp. */
5049DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
5050{
5051 NOREF(iSegReg); NOREF(fAccess);
5052 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5053}
5054#endif
5055
5056
5057/** \#PF(n) - 0e. */
5058VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
5059{
5060 uint16_t uErr;
5061 switch (rc)
5062 {
5063 case VERR_PAGE_NOT_PRESENT:
5064 case VERR_PAGE_TABLE_NOT_PRESENT:
5065 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5066 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5067 uErr = 0;
5068 break;
5069
5070 case VERR_RESERVED_PAGE_TABLE_BITS:
5071 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
5072 break;
5073
5074 default:
5075 AssertMsgFailed(("%Rrc\n", rc));
5076 RT_FALL_THRU();
5077 case VERR_ACCESS_DENIED:
5078 uErr = X86_TRAP_PF_P;
5079 break;
5080 }
5081
5082 if (IEM_GET_CPL(pVCpu) == 3)
5083 uErr |= X86_TRAP_PF_US;
5084
5085 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5086 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5087 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5088 uErr |= X86_TRAP_PF_ID;
5089
5090#if 0 /* This is so much non-sense, really. Why was it done like that? */
5091 /* Note! RW access callers reporting a WRITE protection fault, will clear
5092 the READ flag before calling. So, read-modify-write accesses (RW)
5093 can safely be reported as READ faults. */
5094 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5095 uErr |= X86_TRAP_PF_RW;
5096#else
5097 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5098 {
5099 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5100 /// (regardless of outcome of the comparison in the latter case).
5101 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5102 uErr |= X86_TRAP_PF_RW;
5103 }
5104#endif
5105
5106 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
5107 of the memory operand rather than at the start of it. (Not sure what
5108 happens if it crosses a page boundrary.) The current heuristics for
5109 this is to report the #PF for the last byte if the access is more than
5110 64 bytes. This is probably not correct, but we can work that out later,
5111 main objective now is to get FXSAVE to work like for real hardware and
5112 make bs3-cpu-basic2 work. */
5113 if (cbAccess <= 64)
5114 { /* likely*/ }
5115 else
5116 GCPtrWhere += cbAccess - 1;
5117
5118 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5119 uErr, GCPtrWhere);
5120}
5121
5122#ifdef IEM_WITH_SETJMP
5123/** \#PF(n) - 0e, longjmp. */
5124DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
5125 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
5126{
5127 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
5128}
5129#endif
5130
5131
5132/** \#MF(0) - 10. */
5133VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
5134{
5135 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
5136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5137
5138 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
5139 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
5140 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
5141}
5142
5143#ifdef IEM_WITH_SETJMP
5144/** \#MF(0) - 10, longjmp. */
5145DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5146{
5147 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
5148}
5149#endif
5150
5151
5152/** \#AC(0) - 11. */
5153VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
5154{
5155 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5156}
5157
5158#ifdef IEM_WITH_SETJMP
5159/** \#AC(0) - 11, longjmp. */
5160DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5161{
5162 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
5163}
5164#endif
5165
5166
5167/** \#XF(0)/\#XM(0) - 19. */
5168VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
5169{
5170 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5171}
5172
5173
5174#ifdef IEM_WITH_SETJMP
5175/** \#XF(0)/\#XM(0) - 19s, longjmp. */
5176DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
5177{
5178 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
5179}
5180#endif
5181
5182
5183/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
5184IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5185{
5186 NOREF(cbInstr);
5187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5188}
5189
5190
5191/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
5192IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5193{
5194 NOREF(cbInstr);
5195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5196}
5197
5198
5199/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
5200IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5201{
5202 NOREF(cbInstr);
5203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5204}
5205
5206
5207/** @} */
5208
5209/** @name Common opcode decoders.
5210 * @{
5211 */
5212//#include <iprt/mem.h>
5213
5214/**
5215 * Used to add extra details about a stub case.
5216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5217 */
5218void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
5219{
5220#if defined(LOG_ENABLED) && defined(IN_RING3)
5221 PVM pVM = pVCpu->CTX_SUFF(pVM);
5222 char szRegs[4096];
5223 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5224 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5225 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5226 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5227 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5228 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5229 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5230 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5231 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5232 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5233 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5234 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5235 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5236 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5237 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5238 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5239 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5240 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5241 " efer=%016VR{efer}\n"
5242 " pat=%016VR{pat}\n"
5243 " sf_mask=%016VR{sf_mask}\n"
5244 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5245 " lstar=%016VR{lstar}\n"
5246 " star=%016VR{star} cstar=%016VR{cstar}\n"
5247 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5248 );
5249
5250 char szInstr[256];
5251 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5252 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5253 szInstr, sizeof(szInstr), NULL);
5254
5255 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5256#else
5257 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
5258#endif
5259}
5260
5261/** @} */
5262
5263
5264
5265/** @name Register Access.
5266 * @{
5267 */
5268
5269/**
5270 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5271 *
5272 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5273 * segment limit.
5274 *
5275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5276 * @param cbInstr Instruction size.
5277 * @param offNextInstr The offset of the next instruction.
5278 * @param enmEffOpSize Effective operand size.
5279 */
5280VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5281 IEMMODE enmEffOpSize) RT_NOEXCEPT
5282{
5283 switch (enmEffOpSize)
5284 {
5285 case IEMMODE_16BIT:
5286 {
5287 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
5288 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5289 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5290 pVCpu->cpum.GstCtx.rip = uNewIp;
5291 else
5292 return iemRaiseGeneralProtectionFault0(pVCpu);
5293 break;
5294 }
5295
5296 case IEMMODE_32BIT:
5297 {
5298 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5299 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5300
5301 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5302 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5303 pVCpu->cpum.GstCtx.rip = uNewEip;
5304 else
5305 return iemRaiseGeneralProtectionFault0(pVCpu);
5306 break;
5307 }
5308
5309 case IEMMODE_64BIT:
5310 {
5311 Assert(IEM_IS_64BIT_CODE(pVCpu));
5312
5313 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5314 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5315 pVCpu->cpum.GstCtx.rip = uNewRip;
5316 else
5317 return iemRaiseGeneralProtectionFault0(pVCpu);
5318 break;
5319 }
5320
5321 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5322 }
5323
5324#ifndef IEM_WITH_CODE_TLB
5325 /* Flush the prefetch buffer. */
5326 pVCpu->iem.s.cbOpcode = cbInstr;
5327#endif
5328
5329 /*
5330 * Clear RF and finish the instruction (maybe raise #DB).
5331 */
5332 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5333}
5334
5335
5336/**
5337 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5338 *
5339 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5340 * segment limit.
5341 *
5342 * @returns Strict VBox status code.
5343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5344 * @param cbInstr Instruction size.
5345 * @param offNextInstr The offset of the next instruction.
5346 */
5347VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5348{
5349 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5350
5351 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5352 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5353 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5354 pVCpu->cpum.GstCtx.rip = uNewIp;
5355 else
5356 return iemRaiseGeneralProtectionFault0(pVCpu);
5357
5358#ifndef IEM_WITH_CODE_TLB
5359 /* Flush the prefetch buffer. */
5360 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5361#endif
5362
5363 /*
5364 * Clear RF and finish the instruction (maybe raise #DB).
5365 */
5366 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5367}
5368
5369
5370/**
5371 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5372 *
5373 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5374 * segment limit.
5375 *
5376 * @returns Strict VBox status code.
5377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5378 * @param cbInstr Instruction size.
5379 * @param offNextInstr The offset of the next instruction.
5380 * @param enmEffOpSize Effective operand size.
5381 */
5382VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5383 IEMMODE enmEffOpSize) RT_NOEXCEPT
5384{
5385 if (enmEffOpSize == IEMMODE_32BIT)
5386 {
5387 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5388
5389 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5390 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5391 pVCpu->cpum.GstCtx.rip = uNewEip;
5392 else
5393 return iemRaiseGeneralProtectionFault0(pVCpu);
5394 }
5395 else
5396 {
5397 Assert(enmEffOpSize == IEMMODE_64BIT);
5398
5399 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5400 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5401 pVCpu->cpum.GstCtx.rip = uNewRip;
5402 else
5403 return iemRaiseGeneralProtectionFault0(pVCpu);
5404 }
5405
5406#ifndef IEM_WITH_CODE_TLB
5407 /* Flush the prefetch buffer. */
5408 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5409#endif
5410
5411 /*
5412 * Clear RF and finish the instruction (maybe raise #DB).
5413 */
5414 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5415}
5416
5417/** @} */
5418
5419
5420/** @name FPU access and helpers.
5421 *
5422 * @{
5423 */
5424
5425/**
5426 * Updates the x87.DS and FPUDP registers.
5427 *
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param pFpuCtx The FPU context.
5430 * @param iEffSeg The effective segment register.
5431 * @param GCPtrEff The effective address relative to @a iEffSeg.
5432 */
5433DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5434{
5435 RTSEL sel;
5436 switch (iEffSeg)
5437 {
5438 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5439 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5440 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5441 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5442 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5443 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5444 default:
5445 AssertMsgFailed(("%d\n", iEffSeg));
5446 sel = pVCpu->cpum.GstCtx.ds.Sel;
5447 }
5448 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5449 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5450 {
5451 pFpuCtx->DS = 0;
5452 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5453 }
5454 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5455 {
5456 pFpuCtx->DS = sel;
5457 pFpuCtx->FPUDP = GCPtrEff;
5458 }
5459 else
5460 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5461}
5462
5463
5464/**
5465 * Rotates the stack registers in the push direction.
5466 *
5467 * @param pFpuCtx The FPU context.
5468 * @remarks This is a complete waste of time, but fxsave stores the registers in
5469 * stack order.
5470 */
5471DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5472{
5473 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5474 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5475 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5476 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5477 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5478 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5479 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5480 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5481 pFpuCtx->aRegs[0].r80 = r80Tmp;
5482}
5483
5484
5485/**
5486 * Rotates the stack registers in the pop direction.
5487 *
5488 * @param pFpuCtx The FPU context.
5489 * @remarks This is a complete waste of time, but fxsave stores the registers in
5490 * stack order.
5491 */
5492DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5493{
5494 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5495 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5496 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5497 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5498 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5499 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5500 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5501 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5502 pFpuCtx->aRegs[7].r80 = r80Tmp;
5503}
5504
5505
5506/**
5507 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5508 * exception prevents it.
5509 *
5510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5511 * @param pResult The FPU operation result to push.
5512 * @param pFpuCtx The FPU context.
5513 */
5514static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5515{
5516 /* Update FSW and bail if there are pending exceptions afterwards. */
5517 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5518 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5519 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5520 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5521 {
5522 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5523 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5524 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5525 pFpuCtx->FSW = fFsw;
5526 return;
5527 }
5528
5529 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5530 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5531 {
5532 /* All is fine, push the actual value. */
5533 pFpuCtx->FTW |= RT_BIT(iNewTop);
5534 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5535 }
5536 else if (pFpuCtx->FCW & X86_FCW_IM)
5537 {
5538 /* Masked stack overflow, push QNaN. */
5539 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5540 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5541 }
5542 else
5543 {
5544 /* Raise stack overflow, don't push anything. */
5545 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5546 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5547 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5548 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5549 return;
5550 }
5551
5552 fFsw &= ~X86_FSW_TOP_MASK;
5553 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5554 pFpuCtx->FSW = fFsw;
5555
5556 iemFpuRotateStackPush(pFpuCtx);
5557 RT_NOREF(pVCpu);
5558}
5559
5560
5561/**
5562 * Stores a result in a FPU register and updates the FSW and FTW.
5563 *
5564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5565 * @param pFpuCtx The FPU context.
5566 * @param pResult The result to store.
5567 * @param iStReg Which FPU register to store it in.
5568 */
5569static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5570{
5571 Assert(iStReg < 8);
5572 uint16_t fNewFsw = pFpuCtx->FSW;
5573 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5574 fNewFsw &= ~X86_FSW_C_MASK;
5575 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5576 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5577 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5578 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5579 pFpuCtx->FSW = fNewFsw;
5580 pFpuCtx->FTW |= RT_BIT(iReg);
5581 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5582 RT_NOREF(pVCpu);
5583}
5584
5585
5586/**
5587 * Only updates the FPU status word (FSW) with the result of the current
5588 * instruction.
5589 *
5590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5591 * @param pFpuCtx The FPU context.
5592 * @param u16FSW The FSW output of the current instruction.
5593 */
5594static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5595{
5596 uint16_t fNewFsw = pFpuCtx->FSW;
5597 fNewFsw &= ~X86_FSW_C_MASK;
5598 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5599 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5600 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5601 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5602 pFpuCtx->FSW = fNewFsw;
5603 RT_NOREF(pVCpu);
5604}
5605
5606
5607/**
5608 * Pops one item off the FPU stack if no pending exception prevents it.
5609 *
5610 * @param pFpuCtx The FPU context.
5611 */
5612static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5613{
5614 /* Check pending exceptions. */
5615 uint16_t uFSW = pFpuCtx->FSW;
5616 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5617 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5618 return;
5619
5620 /* TOP--. */
5621 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5622 uFSW &= ~X86_FSW_TOP_MASK;
5623 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5624 pFpuCtx->FSW = uFSW;
5625
5626 /* Mark the previous ST0 as empty. */
5627 iOldTop >>= X86_FSW_TOP_SHIFT;
5628 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5629
5630 /* Rotate the registers. */
5631 iemFpuRotateStackPop(pFpuCtx);
5632}
5633
5634
5635/**
5636 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5637 *
5638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5639 * @param pResult The FPU operation result to push.
5640 * @param uFpuOpcode The FPU opcode value.
5641 */
5642void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5643{
5644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5645 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5646 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5647}
5648
5649
5650/**
5651 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5652 * and sets FPUDP and FPUDS.
5653 *
5654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5655 * @param pResult The FPU operation result to push.
5656 * @param iEffSeg The effective segment register.
5657 * @param GCPtrEff The effective address relative to @a iEffSeg.
5658 * @param uFpuOpcode The FPU opcode value.
5659 */
5660void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5661 uint16_t uFpuOpcode) RT_NOEXCEPT
5662{
5663 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5664 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5665 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5666 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5667}
5668
5669
5670/**
5671 * Replace ST0 with the first value and push the second onto the FPU stack,
5672 * unless a pending exception prevents it.
5673 *
5674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5675 * @param pResult The FPU operation result to store and push.
5676 * @param uFpuOpcode The FPU opcode value.
5677 */
5678void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5679{
5680 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5681 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5682
5683 /* Update FSW and bail if there are pending exceptions afterwards. */
5684 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5685 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5686 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5687 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5688 {
5689 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5690 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5691 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5692 pFpuCtx->FSW = fFsw;
5693 return;
5694 }
5695
5696 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5697 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5698 {
5699 /* All is fine, push the actual value. */
5700 pFpuCtx->FTW |= RT_BIT(iNewTop);
5701 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5702 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5703 }
5704 else if (pFpuCtx->FCW & X86_FCW_IM)
5705 {
5706 /* Masked stack overflow, push QNaN. */
5707 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5708 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5709 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5710 }
5711 else
5712 {
5713 /* Raise stack overflow, don't push anything. */
5714 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5715 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5716 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5717 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5718 return;
5719 }
5720
5721 fFsw &= ~X86_FSW_TOP_MASK;
5722 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5723 pFpuCtx->FSW = fFsw;
5724
5725 iemFpuRotateStackPush(pFpuCtx);
5726}
5727
5728
5729/**
5730 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5731 * FOP.
5732 *
5733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5734 * @param pResult The result to store.
5735 * @param iStReg Which FPU register to store it in.
5736 * @param uFpuOpcode The FPU opcode value.
5737 */
5738void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5739{
5740 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5741 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5742 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5743}
5744
5745
5746/**
5747 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5748 * FOP, and then pops the stack.
5749 *
5750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5751 * @param pResult The result to store.
5752 * @param iStReg Which FPU register to store it in.
5753 * @param uFpuOpcode The FPU opcode value.
5754 */
5755void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5756{
5757 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5758 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5759 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5760 iemFpuMaybePopOne(pFpuCtx);
5761}
5762
5763
5764/**
5765 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5766 * FPUDP, and FPUDS.
5767 *
5768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5769 * @param pResult The result to store.
5770 * @param iStReg Which FPU register to store it in.
5771 * @param iEffSeg The effective memory operand selector register.
5772 * @param GCPtrEff The effective memory operand offset.
5773 * @param uFpuOpcode The FPU opcode value.
5774 */
5775void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5776 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5777{
5778 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5779 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5780 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5781 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5782}
5783
5784
5785/**
5786 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5787 * FPUDP, and FPUDS, and then pops the stack.
5788 *
5789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5790 * @param pResult The result to store.
5791 * @param iStReg Which FPU register to store it in.
5792 * @param iEffSeg The effective memory operand selector register.
5793 * @param GCPtrEff The effective memory operand offset.
5794 * @param uFpuOpcode The FPU opcode value.
5795 */
5796void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5797 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5798{
5799 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5800 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5801 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5802 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5803 iemFpuMaybePopOne(pFpuCtx);
5804}
5805
5806
5807/**
5808 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5809 *
5810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5811 * @param uFpuOpcode The FPU opcode value.
5812 */
5813void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5814{
5815 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5816 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5817}
5818
5819
5820/**
5821 * Updates the FSW, FOP, FPUIP, and FPUCS.
5822 *
5823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5824 * @param u16FSW The FSW from the current instruction.
5825 * @param uFpuOpcode The FPU opcode value.
5826 */
5827void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5828{
5829 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5830 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5831 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5832}
5833
5834
5835/**
5836 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5837 *
5838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5839 * @param u16FSW The FSW from the current instruction.
5840 * @param uFpuOpcode The FPU opcode value.
5841 */
5842void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5843{
5844 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5845 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5846 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5847 iemFpuMaybePopOne(pFpuCtx);
5848}
5849
5850
5851/**
5852 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5853 *
5854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5855 * @param u16FSW The FSW from the current instruction.
5856 * @param iEffSeg The effective memory operand selector register.
5857 * @param GCPtrEff The effective memory operand offset.
5858 * @param uFpuOpcode The FPU opcode value.
5859 */
5860void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5861{
5862 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5863 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5864 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5865 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5866}
5867
5868
5869/**
5870 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5871 *
5872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5873 * @param u16FSW The FSW from the current instruction.
5874 * @param uFpuOpcode The FPU opcode value.
5875 */
5876void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5877{
5878 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5879 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5880 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5881 iemFpuMaybePopOne(pFpuCtx);
5882 iemFpuMaybePopOne(pFpuCtx);
5883}
5884
5885
5886/**
5887 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5888 *
5889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5890 * @param u16FSW The FSW from the current instruction.
5891 * @param iEffSeg The effective memory operand selector register.
5892 * @param GCPtrEff The effective memory operand offset.
5893 * @param uFpuOpcode The FPU opcode value.
5894 */
5895void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5896 uint16_t uFpuOpcode) RT_NOEXCEPT
5897{
5898 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5899 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5900 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5901 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5902 iemFpuMaybePopOne(pFpuCtx);
5903}
5904
5905
5906/**
5907 * Worker routine for raising an FPU stack underflow exception.
5908 *
5909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5910 * @param pFpuCtx The FPU context.
5911 * @param iStReg The stack register being accessed.
5912 */
5913static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5914{
5915 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5916 if (pFpuCtx->FCW & X86_FCW_IM)
5917 {
5918 /* Masked underflow. */
5919 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5920 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5921 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5922 if (iStReg != UINT8_MAX)
5923 {
5924 pFpuCtx->FTW |= RT_BIT(iReg);
5925 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5926 }
5927 }
5928 else
5929 {
5930 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5931 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5932 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5933 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5934 }
5935 RT_NOREF(pVCpu);
5936}
5937
5938
5939/**
5940 * Raises a FPU stack underflow exception.
5941 *
5942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5943 * @param iStReg The destination register that should be loaded
5944 * with QNaN if \#IS is not masked. Specify
5945 * UINT8_MAX if none (like for fcom).
5946 * @param uFpuOpcode The FPU opcode value.
5947 */
5948void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5949{
5950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5951 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5952 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5953}
5954
5955
5956void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5957{
5958 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5959 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5960 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5961 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5962}
5963
5964
5965void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5966{
5967 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5968 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5969 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5970 iemFpuMaybePopOne(pFpuCtx);
5971}
5972
5973
5974void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5975 uint16_t uFpuOpcode) RT_NOEXCEPT
5976{
5977 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5978 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5979 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5980 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5981 iemFpuMaybePopOne(pFpuCtx);
5982}
5983
5984
5985void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5986{
5987 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5988 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5989 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5990 iemFpuMaybePopOne(pFpuCtx);
5991 iemFpuMaybePopOne(pFpuCtx);
5992}
5993
5994
5995void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5996{
5997 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5998 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5999
6000 if (pFpuCtx->FCW & X86_FCW_IM)
6001 {
6002 /* Masked overflow - Push QNaN. */
6003 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6004 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6005 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6006 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6007 pFpuCtx->FTW |= RT_BIT(iNewTop);
6008 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6009 iemFpuRotateStackPush(pFpuCtx);
6010 }
6011 else
6012 {
6013 /* Exception pending - don't change TOP or the register stack. */
6014 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6015 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6016 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
6017 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6018 }
6019}
6020
6021
6022void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
6023{
6024 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6025 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6026
6027 if (pFpuCtx->FCW & X86_FCW_IM)
6028 {
6029 /* Masked overflow - Push QNaN. */
6030 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6031 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6032 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6033 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6034 pFpuCtx->FTW |= RT_BIT(iNewTop);
6035 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6036 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6037 iemFpuRotateStackPush(pFpuCtx);
6038 }
6039 else
6040 {
6041 /* Exception pending - don't change TOP or the register stack. */
6042 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6043 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6044 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
6045 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6046 }
6047}
6048
6049
6050/**
6051 * Worker routine for raising an FPU stack overflow exception on a push.
6052 *
6053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6054 * @param pFpuCtx The FPU context.
6055 */
6056static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
6057{
6058 if (pFpuCtx->FCW & X86_FCW_IM)
6059 {
6060 /* Masked overflow. */
6061 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6062 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6063 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6064 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6065 pFpuCtx->FTW |= RT_BIT(iNewTop);
6066 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6067 iemFpuRotateStackPush(pFpuCtx);
6068 }
6069 else
6070 {
6071 /* Exception pending - don't change TOP or the register stack. */
6072 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6073 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6074 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
6075 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
6076 }
6077 RT_NOREF(pVCpu);
6078}
6079
6080
6081/**
6082 * Raises a FPU stack overflow exception on a push.
6083 *
6084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6085 * @param uFpuOpcode The FPU opcode value.
6086 */
6087void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
6088{
6089 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6090 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6091 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
6092}
6093
6094
6095/**
6096 * Raises a FPU stack overflow exception on a push with a memory operand.
6097 *
6098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6099 * @param iEffSeg The effective memory operand selector register.
6100 * @param GCPtrEff The effective memory operand offset.
6101 * @param uFpuOpcode The FPU opcode value.
6102 */
6103void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
6104{
6105 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
6106 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
6107 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
6108 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
6109}
6110
6111/** @} */
6112
6113
6114/** @name Memory access.
6115 *
6116 * @{
6117 */
6118
6119#undef LOG_GROUP
6120#define LOG_GROUP LOG_GROUP_IEM_MEM
6121
6122/**
6123 * Updates the IEMCPU::cbWritten counter if applicable.
6124 *
6125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6126 * @param fAccess The access being accounted for.
6127 * @param cbMem The access size.
6128 */
6129DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
6130{
6131 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6132 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6133 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
6134}
6135
6136
6137/**
6138 * Applies the segment limit, base and attributes.
6139 *
6140 * This may raise a \#GP or \#SS.
6141 *
6142 * @returns VBox strict status code.
6143 *
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 * @param fAccess The kind of access which is being performed.
6146 * @param iSegReg The index of the segment register to apply.
6147 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6148 * TSS, ++).
6149 * @param cbMem The access size.
6150 * @param pGCPtrMem Pointer to the guest memory address to apply
6151 * segmentation to. Input and output parameter.
6152 */
6153VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
6154{
6155 if (iSegReg == UINT8_MAX)
6156 return VINF_SUCCESS;
6157
6158 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6159 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
6160 switch (IEM_GET_CPU_MODE(pVCpu))
6161 {
6162 case IEMMODE_16BIT:
6163 case IEMMODE_32BIT:
6164 {
6165 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6166 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6167
6168 if ( pSel->Attr.n.u1Present
6169 && !pSel->Attr.n.u1Unusable)
6170 {
6171 Assert(pSel->Attr.n.u1DescType);
6172 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6173 {
6174 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6175 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6176 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
6177
6178 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
6179 {
6180 /** @todo CPL check. */
6181 }
6182
6183 /*
6184 * There are two kinds of data selectors, normal and expand down.
6185 */
6186 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6187 {
6188 if ( GCPtrFirst32 > pSel->u32Limit
6189 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6190 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6191 }
6192 else
6193 {
6194 /*
6195 * The upper boundary is defined by the B bit, not the G bit!
6196 */
6197 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6198 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6199 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6200 }
6201 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6202 }
6203 else
6204 {
6205 /*
6206 * Code selector and usually be used to read thru, writing is
6207 * only permitted in real and V8086 mode.
6208 */
6209 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6210 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6211 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6212 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
6213 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
6214
6215 if ( GCPtrFirst32 > pSel->u32Limit
6216 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6217 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
6218
6219 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
6220 {
6221 /** @todo CPL check. */
6222 }
6223
6224 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6225 }
6226 }
6227 else
6228 return iemRaiseGeneralProtectionFault0(pVCpu);
6229 return VINF_SUCCESS;
6230 }
6231
6232 case IEMMODE_64BIT:
6233 {
6234 RTGCPTR GCPtrMem = *pGCPtrMem;
6235 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6236 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6237
6238 Assert(cbMem >= 1);
6239 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6240 return VINF_SUCCESS;
6241 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
6242 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
6243 return iemRaiseGeneralProtectionFault0(pVCpu);
6244 }
6245
6246 default:
6247 AssertFailedReturn(VERR_IEM_IPE_7);
6248 }
6249}
6250
6251
6252/**
6253 * Translates a virtual address to a physical physical address and checks if we
6254 * can access the page as specified.
6255 *
6256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6257 * @param GCPtrMem The virtual address.
6258 * @param cbAccess The access size, for raising \#PF correctly for
6259 * FXSAVE and such.
6260 * @param fAccess The intended access.
6261 * @param pGCPhysMem Where to return the physical address.
6262 */
6263VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
6264 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
6265{
6266 /** @todo Need a different PGM interface here. We're currently using
6267 * generic / REM interfaces. this won't cut it for R0. */
6268 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
6269 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
6270 * here. */
6271 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6272 PGMPTWALKFAST WalkFast;
6273 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6274 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6275 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6276 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6277 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6278 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6279 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6280 fQPage |= PGMQPAGE_F_USER_MODE;
6281 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6282 if (RT_SUCCESS(rc))
6283 {
6284 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6285
6286 /* If the page is writable and does not have the no-exec bit set, all
6287 access is allowed. Otherwise we'll have to check more carefully... */
6288 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6289 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6290 || (WalkFast.fEffective & X86_PTE_RW)
6291 || ( ( IEM_GET_CPL(pVCpu) != 3
6292 || (fAccess & IEM_ACCESS_WHAT_SYS))
6293 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6294 && ( (WalkFast.fEffective & X86_PTE_US)
6295 || IEM_GET_CPL(pVCpu) != 3
6296 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6297 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6298 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6299 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6300 )
6301 );
6302
6303 /* PGMGstQueryPageFast sets the A & D bits. */
6304 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6305 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6306
6307 *pGCPhysMem = WalkFast.GCPhys;
6308 return VINF_SUCCESS;
6309 }
6310
6311 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6312 /** @todo Check unassigned memory in unpaged mode. */
6313#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6314 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6315 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6316#endif
6317 *pGCPhysMem = NIL_RTGCPHYS;
6318 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6319}
6320
6321#if 0 /*unused*/
6322/**
6323 * Looks up a memory mapping entry.
6324 *
6325 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param pvMem The memory address.
6328 * @param fAccess The access to.
6329 */
6330DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6331{
6332 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6333 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6334 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6335 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6336 return 0;
6337 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6338 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6339 return 1;
6340 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6341 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6342 return 2;
6343 return VERR_NOT_FOUND;
6344}
6345#endif
6346
6347/**
6348 * Finds a free memmap entry when using iNextMapping doesn't work.
6349 *
6350 * @returns Memory mapping index, 1024 on failure.
6351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6352 */
6353static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6354{
6355 /*
6356 * The easy case.
6357 */
6358 if (pVCpu->iem.s.cActiveMappings == 0)
6359 {
6360 pVCpu->iem.s.iNextMapping = 1;
6361 return 0;
6362 }
6363
6364 /* There should be enough mappings for all instructions. */
6365 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6366
6367 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6368 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6369 return i;
6370
6371 AssertFailedReturn(1024);
6372}
6373
6374
6375/**
6376 * Commits a bounce buffer that needs writing back and unmaps it.
6377 *
6378 * @returns Strict VBox status code.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param iMemMap The index of the buffer to commit.
6381 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6382 * Always false in ring-3, obviously.
6383 */
6384static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6385{
6386 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6387 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6388#ifdef IN_RING3
6389 Assert(!fPostponeFail);
6390 RT_NOREF_PV(fPostponeFail);
6391#endif
6392
6393 /*
6394 * Do the writing.
6395 */
6396 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6397 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6398 {
6399 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6400 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6401 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6402 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6403 {
6404 /*
6405 * Carefully and efficiently dealing with access handler return
6406 * codes make this a little bloated.
6407 */
6408 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6410 pbBuf,
6411 cbFirst,
6412 PGMACCESSORIGIN_IEM);
6413 if (rcStrict == VINF_SUCCESS)
6414 {
6415 if (cbSecond)
6416 {
6417 rcStrict = PGMPhysWrite(pVM,
6418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6419 pbBuf + cbFirst,
6420 cbSecond,
6421 PGMACCESSORIGIN_IEM);
6422 if (rcStrict == VINF_SUCCESS)
6423 { /* nothing */ }
6424 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6425 {
6426 LogEx(LOG_GROUP_IEM,
6427 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6430 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6431 }
6432#ifndef IN_RING3
6433 else if (fPostponeFail)
6434 {
6435 LogEx(LOG_GROUP_IEM,
6436 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6440 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6441 return iemSetPassUpStatus(pVCpu, rcStrict);
6442 }
6443#endif
6444 else
6445 {
6446 LogEx(LOG_GROUP_IEM,
6447 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6450 return rcStrict;
6451 }
6452 }
6453 }
6454 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6455 {
6456 if (!cbSecond)
6457 {
6458 LogEx(LOG_GROUP_IEM,
6459 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6461 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6462 }
6463 else
6464 {
6465 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6467 pbBuf + cbFirst,
6468 cbSecond,
6469 PGMACCESSORIGIN_IEM);
6470 if (rcStrict2 == VINF_SUCCESS)
6471 {
6472 LogEx(LOG_GROUP_IEM,
6473 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6476 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6477 }
6478 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6479 {
6480 LogEx(LOG_GROUP_IEM,
6481 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6484 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6485 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6486 }
6487#ifndef IN_RING3
6488 else if (fPostponeFail)
6489 {
6490 LogEx(LOG_GROUP_IEM,
6491 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6494 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6495 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6496 return iemSetPassUpStatus(pVCpu, rcStrict);
6497 }
6498#endif
6499 else
6500 {
6501 LogEx(LOG_GROUP_IEM,
6502 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6505 return rcStrict2;
6506 }
6507 }
6508 }
6509#ifndef IN_RING3
6510 else if (fPostponeFail)
6511 {
6512 LogEx(LOG_GROUP_IEM,
6513 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6516 if (!cbSecond)
6517 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6518 else
6519 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6520 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6521 return iemSetPassUpStatus(pVCpu, rcStrict);
6522 }
6523#endif
6524 else
6525 {
6526 LogEx(LOG_GROUP_IEM,
6527 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6528 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6530 return rcStrict;
6531 }
6532 }
6533 else
6534 {
6535 /*
6536 * No access handlers, much simpler.
6537 */
6538 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6539 if (RT_SUCCESS(rc))
6540 {
6541 if (cbSecond)
6542 {
6543 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6544 if (RT_SUCCESS(rc))
6545 { /* likely */ }
6546 else
6547 {
6548 LogEx(LOG_GROUP_IEM,
6549 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6552 return rc;
6553 }
6554 }
6555 }
6556 else
6557 {
6558 LogEx(LOG_GROUP_IEM,
6559 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6560 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6561 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6562 return rc;
6563 }
6564 }
6565 }
6566
6567#if defined(IEM_LOG_MEMORY_WRITES)
6568 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6569 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6570 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6571 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6572 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6573 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6574
6575 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6576 g_cbIemWrote = cbWrote;
6577 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6578#endif
6579
6580 /*
6581 * Free the mapping entry.
6582 */
6583 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6584 Assert(pVCpu->iem.s.cActiveMappings != 0);
6585 pVCpu->iem.s.cActiveMappings--;
6586 return VINF_SUCCESS;
6587}
6588
6589
6590/**
6591 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6592 */
6593DECL_FORCE_INLINE(uint32_t)
6594iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6595{
6596 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6597 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6598 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6599 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6600}
6601
6602
6603/**
6604 * iemMemMap worker that deals with a request crossing pages.
6605 */
6606static VBOXSTRICTRC
6607iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6608 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6609{
6610 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6611 Assert(cbMem <= GUEST_PAGE_SIZE);
6612
6613 /*
6614 * Do the address translations.
6615 */
6616 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6617 RTGCPHYS GCPhysFirst;
6618 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6619 if (rcStrict != VINF_SUCCESS)
6620 return rcStrict;
6621 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6622
6623 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6624 RTGCPHYS GCPhysSecond;
6625 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6626 cbSecondPage, fAccess, &GCPhysSecond);
6627 if (rcStrict != VINF_SUCCESS)
6628 return rcStrict;
6629 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6630 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6631
6632 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6633
6634 /*
6635 * Check for data breakpoints.
6636 */
6637 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6638 { /* likely */ }
6639 else
6640 {
6641 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6642 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6643 cbSecondPage, fAccess);
6644 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6645 if (fDataBps > 1)
6646 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6647 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6648 }
6649
6650 /*
6651 * Read in the current memory content if it's a read, execute or partial
6652 * write access.
6653 */
6654 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6655
6656 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6657 {
6658 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6659 {
6660 /*
6661 * Must carefully deal with access handler status codes here,
6662 * makes the code a bit bloated.
6663 */
6664 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6665 if (rcStrict == VINF_SUCCESS)
6666 {
6667 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6668 if (rcStrict == VINF_SUCCESS)
6669 { /*likely */ }
6670 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6671 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6672 else
6673 {
6674 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6675 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6676 return rcStrict;
6677 }
6678 }
6679 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6680 {
6681 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6682 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6683 {
6684 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6685 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6686 }
6687 else
6688 {
6689 LogEx(LOG_GROUP_IEM,
6690 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6691 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6692 return rcStrict2;
6693 }
6694 }
6695 else
6696 {
6697 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6698 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6699 return rcStrict;
6700 }
6701 }
6702 else
6703 {
6704 /*
6705 * No informational status codes here, much more straight forward.
6706 */
6707 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6708 if (RT_SUCCESS(rc))
6709 {
6710 Assert(rc == VINF_SUCCESS);
6711 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6712 if (RT_SUCCESS(rc))
6713 Assert(rc == VINF_SUCCESS);
6714 else
6715 {
6716 LogEx(LOG_GROUP_IEM,
6717 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6718 return rc;
6719 }
6720 }
6721 else
6722 {
6723 LogEx(LOG_GROUP_IEM,
6724 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6725 return rc;
6726 }
6727 }
6728 }
6729#ifdef VBOX_STRICT
6730 else
6731 memset(pbBuf, 0xcc, cbMem);
6732 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6733 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6734#endif
6735 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6736
6737 /*
6738 * Commit the bounce buffer entry.
6739 */
6740 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6741 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6742 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6743 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6744 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6745 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6746 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6747 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6748 pVCpu->iem.s.cActiveMappings++;
6749
6750 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6751 *ppvMem = pbBuf;
6752 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6753 return VINF_SUCCESS;
6754}
6755
6756
6757/**
6758 * iemMemMap woker that deals with iemMemPageMap failures.
6759 */
6760static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6761 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6762{
6763 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6764
6765 /*
6766 * Filter out conditions we can handle and the ones which shouldn't happen.
6767 */
6768 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6769 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6770 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6771 {
6772 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6773 return rcMap;
6774 }
6775 pVCpu->iem.s.cPotentialExits++;
6776
6777 /*
6778 * Read in the current memory content if it's a read, execute or partial
6779 * write access.
6780 */
6781 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6782 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6783 {
6784 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6785 memset(pbBuf, 0xff, cbMem);
6786 else
6787 {
6788 int rc;
6789 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6790 {
6791 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6792 if (rcStrict == VINF_SUCCESS)
6793 { /* nothing */ }
6794 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6795 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6796 else
6797 {
6798 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6799 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6800 return rcStrict;
6801 }
6802 }
6803 else
6804 {
6805 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6806 if (RT_SUCCESS(rc))
6807 { /* likely */ }
6808 else
6809 {
6810 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6811 GCPhysFirst, rc));
6812 return rc;
6813 }
6814 }
6815 }
6816 }
6817#ifdef VBOX_STRICT
6818 else
6819 memset(pbBuf, 0xcc, cbMem);
6820#endif
6821#ifdef VBOX_STRICT
6822 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6823 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6824#endif
6825
6826 /*
6827 * Commit the bounce buffer entry.
6828 */
6829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6831 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6832 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6833 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6834 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6835 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6836 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6837 pVCpu->iem.s.cActiveMappings++;
6838
6839 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6840 *ppvMem = pbBuf;
6841 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6842 return VINF_SUCCESS;
6843}
6844
6845
6846
6847/**
6848 * Maps the specified guest memory for the given kind of access.
6849 *
6850 * This may be using bounce buffering of the memory if it's crossing a page
6851 * boundary or if there is an access handler installed for any of it. Because
6852 * of lock prefix guarantees, we're in for some extra clutter when this
6853 * happens.
6854 *
6855 * This may raise a \#GP, \#SS, \#PF or \#AC.
6856 *
6857 * @returns VBox strict status code.
6858 *
6859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6860 * @param ppvMem Where to return the pointer to the mapped memory.
6861 * @param pbUnmapInfo Where to return unmap info to be passed to
6862 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6863 * done.
6864 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6865 * 8, 12, 16, 32 or 512. When used by string operations
6866 * it can be up to a page.
6867 * @param iSegReg The index of the segment register to use for this
6868 * access. The base and limits are checked. Use UINT8_MAX
6869 * to indicate that no segmentation is required (for IDT,
6870 * GDT and LDT accesses).
6871 * @param GCPtrMem The address of the guest memory.
6872 * @param fAccess How the memory is being accessed. The
6873 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6874 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6875 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6876 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6877 * set.
6878 * @param uAlignCtl Alignment control:
6879 * - Bits 15:0 is the alignment mask.
6880 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6881 * IEM_MEMMAP_F_ALIGN_SSE, and
6882 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6883 * Pass zero to skip alignment.
6884 */
6885VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6886 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6887{
6888 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6889
6890 /*
6891 * Check the input and figure out which mapping entry to use.
6892 */
6893 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6894 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6895 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6896 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6897 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6898
6899 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6900 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6901 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6902 {
6903 iMemMap = iemMemMapFindFree(pVCpu);
6904 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6905 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6906 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6907 pVCpu->iem.s.aMemMappings[2].fAccess),
6908 VERR_IEM_IPE_9);
6909 }
6910
6911 /*
6912 * Map the memory, checking that we can actually access it. If something
6913 * slightly complicated happens, fall back on bounce buffering.
6914 */
6915 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6916 if (rcStrict == VINF_SUCCESS)
6917 { /* likely */ }
6918 else
6919 return rcStrict;
6920
6921 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6922 { /* likely */ }
6923 else
6924 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6925
6926 /*
6927 * Alignment check.
6928 */
6929 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6930 { /* likelyish */ }
6931 else
6932 {
6933 /* Misaligned access. */
6934 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6935 {
6936 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6937 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6938 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6939 {
6940 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6941
6942 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6943 { /* likely */ }
6944 else
6945 return iemRaiseAlignmentCheckException(pVCpu);
6946 }
6947 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6948 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6949 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6950 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6951 * that's what FXSAVE does on a 10980xe. */
6952 && iemMemAreAlignmentChecksEnabled(pVCpu))
6953 return iemRaiseAlignmentCheckException(pVCpu);
6954 else
6955 return iemRaiseGeneralProtectionFault0(pVCpu);
6956 }
6957
6958#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6959 /* If the access is atomic there are host platform alignmnet restrictions
6960 we need to conform with. */
6961 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6962# if defined(RT_ARCH_AMD64)
6963 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6964# elif defined(RT_ARCH_ARM64)
6965 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6966# else
6967# error port me
6968# endif
6969 )
6970 { /* okay */ }
6971 else
6972 {
6973 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6974 pVCpu->iem.s.cMisalignedAtomics += 1;
6975 return VINF_EM_EMULATE_SPLIT_LOCK;
6976 }
6977#endif
6978 }
6979
6980#ifdef IEM_WITH_DATA_TLB
6981 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6982
6983 /*
6984 * Get the TLB entry for this page and check PT flags.
6985 *
6986 * We reload the TLB entry if we need to set the dirty bit (accessed
6987 * should in theory always be set).
6988 */
6989 uint8_t *pbMem = NULL;
6990 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6991 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6992 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6993 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6994 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6995 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6996 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6997 {
6998# ifdef IEM_WITH_TLB_STATISTICS
6999 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7000# endif
7001
7002 /* If the page is either supervisor only or non-writable, we need to do
7003 more careful access checks. */
7004 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
7005 {
7006 /* Write to read only memory? */
7007 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
7008 && (fAccess & IEM_ACCESS_TYPE_WRITE)
7009 && ( ( IEM_GET_CPL(pVCpu) == 3
7010 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7011 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
7012 {
7013 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7014 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7015 }
7016
7017 /* Kernel memory accessed by userland? */
7018 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
7019 && IEM_GET_CPL(pVCpu) == 3
7020 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7021 {
7022 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7023 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7024 }
7025 }
7026
7027 /* Look up the physical page info if necessary. */
7028 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7029# ifdef IN_RING3
7030 pbMem = pTlbe->pbMappingR3;
7031# else
7032 pbMem = NULL;
7033# endif
7034 else
7035 {
7036 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
7037 { /* likely */ }
7038 else
7039 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
7040 pTlbe->pbMappingR3 = NULL;
7041 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7042 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7043 &pbMem, &pTlbe->fFlagsAndPhysRev);
7044 AssertRCReturn(rc, rc);
7045# ifdef IN_RING3
7046 pTlbe->pbMappingR3 = pbMem;
7047# endif
7048 }
7049 }
7050 else
7051 {
7052 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7053
7054 /* This page table walking will set A bits as required by the access while performing the walk.
7055 ASSUMES these are set when the address is translated rather than on commit... */
7056 /** @todo testcase: check when A bits are actually set by the CPU for code. */
7057 PGMPTWALKFAST WalkFast;
7058 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7059 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7060 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7061 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7062 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7063 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7064 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7065 fQPage |= PGMQPAGE_F_USER_MODE;
7066 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7067 if (RT_SUCCESS(rc))
7068 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7069 else
7070 {
7071 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7072# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7073 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7074 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7075# endif
7076 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7077 }
7078
7079 uint32_t fDataBps;
7080 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7081 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7082 {
7083 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7084 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7085 {
7086 pTlbe--;
7087 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7088 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7089 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7090# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7091 else
7092 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
7093# endif
7094 }
7095 else
7096 {
7097 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7098 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7099 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7100 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7101# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7102 else
7103 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
7104# endif
7105 }
7106 }
7107 else
7108 {
7109 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7110 to the page with the data access breakpoint armed on it to pass thru here. */
7111 if (fDataBps > 1)
7112 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7113 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7114 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7115 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7116 pTlbe->uTag = uTagNoRev;
7117 }
7118 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7119 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7120 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7121 pTlbe->GCPhys = GCPhysPg;
7122 pTlbe->pbMappingR3 = NULL;
7123 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
7124 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
7125 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
7126 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
7127 || IEM_GET_CPL(pVCpu) != 3
7128 || (fAccess & IEM_ACCESS_WHAT_SYS));
7129
7130 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7131 {
7132 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7133 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7134 else
7135 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7136 }
7137
7138 /* Resolve the physical address. */
7139 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7140 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7141 &pbMem, &pTlbe->fFlagsAndPhysRev);
7142 AssertRCReturn(rc, rc);
7143# ifdef IN_RING3
7144 pTlbe->pbMappingR3 = pbMem;
7145# endif
7146 }
7147
7148 /*
7149 * Check the physical page level access and mapping.
7150 */
7151 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
7152 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
7153 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
7154 { /* probably likely */ }
7155 else
7156 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
7157 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7158 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7159 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7160 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7161 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7162
7163 if (pbMem)
7164 {
7165 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7166 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7167 fAccess |= IEM_ACCESS_NOT_LOCKED;
7168 }
7169 else
7170 {
7171 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7172 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7173 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7174 if (rcStrict != VINF_SUCCESS)
7175 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7176 }
7177
7178 void * const pvMem = pbMem;
7179
7180 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7181 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7182 if (fAccess & IEM_ACCESS_TYPE_READ)
7183 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7184
7185#else /* !IEM_WITH_DATA_TLB */
7186
7187 RTGCPHYS GCPhysFirst;
7188 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7189 if (rcStrict != VINF_SUCCESS)
7190 return rcStrict;
7191
7192 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7193 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7194 if (fAccess & IEM_ACCESS_TYPE_READ)
7195 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7196
7197 void *pvMem;
7198 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7199 if (rcStrict != VINF_SUCCESS)
7200 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7201
7202#endif /* !IEM_WITH_DATA_TLB */
7203
7204 /*
7205 * Fill in the mapping table entry.
7206 */
7207 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7208 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7209 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7210 pVCpu->iem.s.cActiveMappings += 1;
7211
7212 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7213 *ppvMem = pvMem;
7214 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7215 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
7216 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
7217
7218 return VINF_SUCCESS;
7219}
7220
7221
7222/**
7223 * Commits the guest memory if bounce buffered and unmaps it.
7224 *
7225 * @returns Strict VBox status code.
7226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7227 * @param bUnmapInfo Unmap info set by iemMemMap.
7228 */
7229VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7230{
7231 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7232 AssertMsgReturn( (bUnmapInfo & 0x08)
7233 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7234 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
7235 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7236 VERR_NOT_FOUND);
7237
7238 /* If it's bounce buffered, we may need to write back the buffer. */
7239 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7240 {
7241 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7242 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7243 }
7244 /* Otherwise unlock it. */
7245 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7246 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7247
7248 /* Free the entry. */
7249 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7250 Assert(pVCpu->iem.s.cActiveMappings != 0);
7251 pVCpu->iem.s.cActiveMappings--;
7252 return VINF_SUCCESS;
7253}
7254
7255
7256/**
7257 * Rolls back the guest memory (conceptually only) and unmaps it.
7258 *
7259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7260 * @param bUnmapInfo Unmap info set by iemMemMap.
7261 */
7262void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7263{
7264 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7265 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7266 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7267 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7268 == ((unsigned)bUnmapInfo >> 4),
7269 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7270
7271 /* Unlock it if necessary. */
7272 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7273 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7274
7275 /* Free the entry. */
7276 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7277 Assert(pVCpu->iem.s.cActiveMappings != 0);
7278 pVCpu->iem.s.cActiveMappings--;
7279}
7280
7281#ifdef IEM_WITH_SETJMP
7282
7283/**
7284 * Maps the specified guest memory for the given kind of access, longjmp on
7285 * error.
7286 *
7287 * This may be using bounce buffering of the memory if it's crossing a page
7288 * boundary or if there is an access handler installed for any of it. Because
7289 * of lock prefix guarantees, we're in for some extra clutter when this
7290 * happens.
7291 *
7292 * This may raise a \#GP, \#SS, \#PF or \#AC.
7293 *
7294 * @returns Pointer to the mapped memory.
7295 *
7296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7297 * @param bUnmapInfo Where to return unmap info to be passed to
7298 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
7299 * iemMemCommitAndUnmapWoSafeJmp,
7300 * iemMemCommitAndUnmapRoSafeJmp,
7301 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
7302 * when done.
7303 * @param cbMem The number of bytes to map. This is usually 1,
7304 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7305 * string operations it can be up to a page.
7306 * @param iSegReg The index of the segment register to use for
7307 * this access. The base and limits are checked.
7308 * Use UINT8_MAX to indicate that no segmentation
7309 * is required (for IDT, GDT and LDT accesses).
7310 * @param GCPtrMem The address of the guest memory.
7311 * @param fAccess How the memory is being accessed. The
7312 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7313 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7314 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7315 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7316 * set.
7317 * @param uAlignCtl Alignment control:
7318 * - Bits 15:0 is the alignment mask.
7319 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7320 * IEM_MEMMAP_F_ALIGN_SSE, and
7321 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7322 * Pass zero to skip alignment.
7323 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7324 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7325 * needs counting as such in the statistics.
7326 */
7327template<bool a_fSafeCall = false>
7328static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7329 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7330{
7331 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7332
7333 /*
7334 * Check the input, check segment access and adjust address
7335 * with segment base.
7336 */
7337 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7338 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7339 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7340
7341 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7342 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7343 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7344
7345 /*
7346 * Alignment check.
7347 */
7348 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7349 { /* likelyish */ }
7350 else
7351 {
7352 /* Misaligned access. */
7353 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7354 {
7355 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7356 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7357 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7358 {
7359 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7360
7361 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7362 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7363 }
7364 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7365 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7366 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7367 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7368 * that's what FXSAVE does on a 10980xe. */
7369 && iemMemAreAlignmentChecksEnabled(pVCpu))
7370 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7371 else
7372 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7373 }
7374
7375#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7376 /* If the access is atomic there are host platform alignmnet restrictions
7377 we need to conform with. */
7378 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7379# if defined(RT_ARCH_AMD64)
7380 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7381# elif defined(RT_ARCH_ARM64)
7382 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7383# else
7384# error port me
7385# endif
7386 )
7387 { /* okay */ }
7388 else
7389 {
7390 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7391 pVCpu->iem.s.cMisalignedAtomics += 1;
7392 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7393 }
7394#endif
7395 }
7396
7397 /*
7398 * Figure out which mapping entry to use.
7399 */
7400 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7401 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7402 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7403 {
7404 iMemMap = iemMemMapFindFree(pVCpu);
7405 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7406 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7407 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7408 pVCpu->iem.s.aMemMappings[2].fAccess),
7409 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7410 }
7411
7412 /*
7413 * Crossing a page boundary?
7414 */
7415 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7416 { /* No (likely). */ }
7417 else
7418 {
7419 void *pvMem;
7420 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7421 if (rcStrict == VINF_SUCCESS)
7422 return pvMem;
7423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7424 }
7425
7426#ifdef IEM_WITH_DATA_TLB
7427 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7428
7429 /*
7430 * Get the TLB entry for this page checking that it has the A & D bits
7431 * set as per fAccess flags.
7432 */
7433 /** @todo make the caller pass these in with fAccess. */
7434 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7435 ? IEMTLBE_F_PT_NO_USER : 0;
7436 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7437 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7438 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7439 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7440 ? IEMTLBE_F_PT_NO_WRITE : 0)
7441 : 0;
7442 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7443 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7444 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7445 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7446 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7447 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7448 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7449 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7450 {
7451# ifdef IEM_WITH_TLB_STATISTICS
7452 if (a_fSafeCall)
7453 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7454 else
7455 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7456# endif
7457 }
7458 else
7459 {
7460 if (a_fSafeCall)
7461 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7462 else
7463 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7464
7465 /* This page table walking will set A and D bits as required by the
7466 access while performing the walk.
7467 ASSUMES these are set when the address is translated rather than on commit... */
7468 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7469 PGMPTWALKFAST WalkFast;
7470 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7471 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7472 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7473 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7474 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7475 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7476 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7477 fQPage |= PGMQPAGE_F_USER_MODE;
7478 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7479 if (RT_SUCCESS(rc))
7480 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7481 else
7482 {
7483 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7484# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7485 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7486 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7487# endif
7488 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7489 }
7490
7491 uint32_t fDataBps;
7492 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7493 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7494 {
7495 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7496 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7497 {
7498 pTlbe--;
7499 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7500 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7501 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7502# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7503 else
7504 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
7505# endif
7506 }
7507 else
7508 {
7509 if (a_fSafeCall)
7510 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7511 else
7512 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7513 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7514 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7515 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7516# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
7517 else
7518 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
7519# endif
7520 }
7521 }
7522 else
7523 {
7524 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7525 to the page with the data access breakpoint armed on it to pass thru here. */
7526 if (fDataBps > 1)
7527 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7528 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7529 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7530 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7531 pTlbe->uTag = uTagNoRev;
7532 }
7533 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7534 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7535 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7536 pTlbe->GCPhys = GCPhysPg;
7537 pTlbe->pbMappingR3 = NULL;
7538 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7539 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7540 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7541
7542 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7543 {
7544 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7545 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7546 else
7547 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7548 }
7549
7550 /* Resolve the physical address. */
7551 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7552 uint8_t *pbMemFullLoad = NULL;
7553 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7554 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7555 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7556# ifdef IN_RING3
7557 pTlbe->pbMappingR3 = pbMemFullLoad;
7558# endif
7559 }
7560
7561 /*
7562 * Check the flags and physical revision.
7563 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7564 * just to keep the code structure simple (i.e. avoid gotos or similar).
7565 */
7566 uint8_t *pbMem;
7567 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7568 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7569# ifdef IN_RING3
7570 pbMem = pTlbe->pbMappingR3;
7571# else
7572 pbMem = NULL;
7573# endif
7574 else
7575 {
7576 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7577
7578 /*
7579 * Okay, something isn't quite right or needs refreshing.
7580 */
7581 /* Write to read only memory? */
7582 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7583 {
7584 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7585# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7586/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7587 * to trigger an \#PG or a VM nested paging exit here yet! */
7588 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7589 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7590# endif
7591 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7592 }
7593
7594 /* Kernel memory accessed by userland? */
7595 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7596 {
7597 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7598# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7599/** @todo TLB: See above. */
7600 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7601 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7602# endif
7603 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7604 }
7605
7606 /*
7607 * Check if the physical page info needs updating.
7608 */
7609 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7610# ifdef IN_RING3
7611 pbMem = pTlbe->pbMappingR3;
7612# else
7613 pbMem = NULL;
7614# endif
7615 else
7616 {
7617 pTlbe->pbMappingR3 = NULL;
7618 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7619 pbMem = NULL;
7620 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7621 &pbMem, &pTlbe->fFlagsAndPhysRev);
7622 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7623# ifdef IN_RING3
7624 pTlbe->pbMappingR3 = pbMem;
7625# endif
7626 }
7627
7628 /*
7629 * Check the physical page level access and mapping.
7630 */
7631 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7632 { /* probably likely */ }
7633 else
7634 {
7635 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7636 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7637 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7638 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7639 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7640 if (rcStrict == VINF_SUCCESS)
7641 return pbMem;
7642 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7643 }
7644 }
7645 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7646
7647 if (pbMem)
7648 {
7649 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7650 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7651 fAccess |= IEM_ACCESS_NOT_LOCKED;
7652 }
7653 else
7654 {
7655 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7656 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7657 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7658 if (rcStrict == VINF_SUCCESS)
7659 {
7660 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7661 return pbMem;
7662 }
7663 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7664 }
7665
7666 void * const pvMem = pbMem;
7667
7668 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7669 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7670 if (fAccess & IEM_ACCESS_TYPE_READ)
7671 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7672
7673#else /* !IEM_WITH_DATA_TLB */
7674
7675
7676 RTGCPHYS GCPhysFirst;
7677 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7678 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7679 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7680
7681 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7682 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7683 if (fAccess & IEM_ACCESS_TYPE_READ)
7684 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7685
7686 void *pvMem;
7687 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7688 if (rcStrict == VINF_SUCCESS)
7689 { /* likely */ }
7690 else
7691 {
7692 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7693 if (rcStrict == VINF_SUCCESS)
7694 return pvMem;
7695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7696 }
7697
7698#endif /* !IEM_WITH_DATA_TLB */
7699
7700 /*
7701 * Fill in the mapping table entry.
7702 */
7703 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7704 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7705 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7706 pVCpu->iem.s.cActiveMappings++;
7707
7708 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7709
7710 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7711 return pvMem;
7712}
7713
7714
7715/** @see iemMemMapJmp */
7716static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7717 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7718{
7719 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7720}
7721
7722
7723/**
7724 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7725 *
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param pvMem The mapping.
7728 * @param fAccess The kind of access.
7729 */
7730void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7731{
7732 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7733 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7734 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7735 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7736 == ((unsigned)bUnmapInfo >> 4),
7737 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7738
7739 /* If it's bounce buffered, we may need to write back the buffer. */
7740 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7741 {
7742 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7743 {
7744 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7745 if (rcStrict == VINF_SUCCESS)
7746 return;
7747 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7748 }
7749 }
7750 /* Otherwise unlock it. */
7751 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7752 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7753
7754 /* Free the entry. */
7755 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7756 Assert(pVCpu->iem.s.cActiveMappings != 0);
7757 pVCpu->iem.s.cActiveMappings--;
7758}
7759
7760
7761/** Fallback for iemMemCommitAndUnmapRwJmp. */
7762void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7763{
7764 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7765 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7766}
7767
7768
7769/** Fallback for iemMemCommitAndUnmapAtJmp. */
7770void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7771{
7772 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7773 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7774}
7775
7776
7777/** Fallback for iemMemCommitAndUnmapWoJmp. */
7778void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7779{
7780 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7781 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7782}
7783
7784
7785/** Fallback for iemMemCommitAndUnmapRoJmp. */
7786void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7787{
7788 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7789 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7790}
7791
7792
7793/** Fallback for iemMemRollbackAndUnmapWo. */
7794void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7795{
7796 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7797 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7798}
7799
7800#endif /* IEM_WITH_SETJMP */
7801
7802#ifndef IN_RING3
7803/**
7804 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7805 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7806 *
7807 * Allows the instruction to be completed and retired, while the IEM user will
7808 * return to ring-3 immediately afterwards and do the postponed writes there.
7809 *
7810 * @returns VBox status code (no strict statuses). Caller must check
7811 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7813 * @param pvMem The mapping.
7814 * @param fAccess The kind of access.
7815 */
7816VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7817{
7818 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7819 AssertMsgReturn( (bUnmapInfo & 0x08)
7820 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7821 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7822 == ((unsigned)bUnmapInfo >> 4),
7823 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7824 VERR_NOT_FOUND);
7825
7826 /* If it's bounce buffered, we may need to write back the buffer. */
7827 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7828 {
7829 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7830 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7831 }
7832 /* Otherwise unlock it. */
7833 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7834 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7835
7836 /* Free the entry. */
7837 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7838 Assert(pVCpu->iem.s.cActiveMappings != 0);
7839 pVCpu->iem.s.cActiveMappings--;
7840 return VINF_SUCCESS;
7841}
7842#endif
7843
7844
7845/**
7846 * Rollbacks mappings, releasing page locks and such.
7847 *
7848 * The caller shall only call this after checking cActiveMappings.
7849 *
7850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7851 */
7852void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7853{
7854 Assert(pVCpu->iem.s.cActiveMappings > 0);
7855
7856 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7857 while (iMemMap-- > 0)
7858 {
7859 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7860 if (fAccess != IEM_ACCESS_INVALID)
7861 {
7862 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7863 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7864 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7865 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7866 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7867 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7868 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7869 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7870 pVCpu->iem.s.cActiveMappings--;
7871 }
7872 }
7873}
7874
7875
7876/*
7877 * Instantiate R/W templates.
7878 */
7879#define TMPL_MEM_WITH_STACK
7880
7881#define TMPL_MEM_TYPE uint8_t
7882#define TMPL_MEM_FN_SUFF U8
7883#define TMPL_MEM_FMT_TYPE "%#04x"
7884#define TMPL_MEM_FMT_DESC "byte"
7885#include "IEMAllMemRWTmpl.cpp.h"
7886
7887#define TMPL_MEM_TYPE uint16_t
7888#define TMPL_MEM_FN_SUFF U16
7889#define TMPL_MEM_FMT_TYPE "%#06x"
7890#define TMPL_MEM_FMT_DESC "word"
7891#include "IEMAllMemRWTmpl.cpp.h"
7892
7893#define TMPL_WITH_PUSH_SREG
7894#define TMPL_MEM_TYPE uint32_t
7895#define TMPL_MEM_FN_SUFF U32
7896#define TMPL_MEM_FMT_TYPE "%#010x"
7897#define TMPL_MEM_FMT_DESC "dword"
7898#include "IEMAllMemRWTmpl.cpp.h"
7899#undef TMPL_WITH_PUSH_SREG
7900
7901#define TMPL_MEM_TYPE uint64_t
7902#define TMPL_MEM_FN_SUFF U64
7903#define TMPL_MEM_FMT_TYPE "%#018RX64"
7904#define TMPL_MEM_FMT_DESC "qword"
7905#include "IEMAllMemRWTmpl.cpp.h"
7906
7907#undef TMPL_MEM_WITH_STACK
7908
7909#define TMPL_MEM_TYPE uint32_t
7910#define TMPL_MEM_TYPE_ALIGN 0
7911#define TMPL_MEM_FN_SUFF U32NoAc
7912#define TMPL_MEM_FMT_TYPE "%#010x"
7913#define TMPL_MEM_FMT_DESC "dword"
7914#include "IEMAllMemRWTmpl.cpp.h"
7915#undef TMPL_WITH_PUSH_SREG
7916
7917#define TMPL_MEM_TYPE uint64_t
7918#define TMPL_MEM_TYPE_ALIGN 0
7919#define TMPL_MEM_FN_SUFF U64NoAc
7920#define TMPL_MEM_FMT_TYPE "%#018RX64"
7921#define TMPL_MEM_FMT_DESC "qword"
7922#include "IEMAllMemRWTmpl.cpp.h"
7923
7924#define TMPL_MEM_TYPE uint64_t
7925#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7926#define TMPL_MEM_FN_SUFF U64AlignedU128
7927#define TMPL_MEM_FMT_TYPE "%#018RX64"
7928#define TMPL_MEM_FMT_DESC "qword"
7929#include "IEMAllMemRWTmpl.cpp.h"
7930
7931/* See IEMAllMemRWTmplInline.cpp.h */
7932#define TMPL_MEM_BY_REF
7933
7934#define TMPL_MEM_TYPE RTFLOAT80U
7935#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7936#define TMPL_MEM_FN_SUFF R80
7937#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7938#define TMPL_MEM_FMT_DESC "tword"
7939#include "IEMAllMemRWTmpl.cpp.h"
7940
7941#define TMPL_MEM_TYPE RTPBCD80U
7942#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7943#define TMPL_MEM_FN_SUFF D80
7944#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7945#define TMPL_MEM_FMT_DESC "tword"
7946#include "IEMAllMemRWTmpl.cpp.h"
7947
7948#define TMPL_MEM_TYPE RTUINT128U
7949#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7950#define TMPL_MEM_FN_SUFF U128
7951#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7952#define TMPL_MEM_FMT_DESC "dqword"
7953#include "IEMAllMemRWTmpl.cpp.h"
7954
7955#define TMPL_MEM_TYPE RTUINT128U
7956#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7957#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7958#define TMPL_MEM_FN_SUFF U128AlignedSse
7959#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7960#define TMPL_MEM_FMT_DESC "dqword"
7961#include "IEMAllMemRWTmpl.cpp.h"
7962
7963#define TMPL_MEM_TYPE RTUINT128U
7964#define TMPL_MEM_TYPE_ALIGN 0
7965#define TMPL_MEM_FN_SUFF U128NoAc
7966#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7967#define TMPL_MEM_FMT_DESC "dqword"
7968#include "IEMAllMemRWTmpl.cpp.h"
7969
7970#define TMPL_MEM_TYPE RTUINT256U
7971#define TMPL_MEM_TYPE_ALIGN 0
7972#define TMPL_MEM_FN_SUFF U256NoAc
7973#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7974#define TMPL_MEM_FMT_DESC "qqword"
7975#include "IEMAllMemRWTmpl.cpp.h"
7976
7977#define TMPL_MEM_TYPE RTUINT256U
7978#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7979#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7980#define TMPL_MEM_FN_SUFF U256AlignedAvx
7981#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7982#define TMPL_MEM_FMT_DESC "qqword"
7983#include "IEMAllMemRWTmpl.cpp.h"
7984
7985/**
7986 * Fetches a data dword and zero extends it to a qword.
7987 *
7988 * @returns Strict VBox status code.
7989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7990 * @param pu64Dst Where to return the qword.
7991 * @param iSegReg The index of the segment register to use for
7992 * this access. The base and limits are checked.
7993 * @param GCPtrMem The address of the guest memory.
7994 */
7995VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7996{
7997 /* The lazy approach for now... */
7998 uint8_t bUnmapInfo;
7999 uint32_t const *pu32Src;
8000 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
8001 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
8002 if (rc == VINF_SUCCESS)
8003 {
8004 *pu64Dst = *pu32Src;
8005 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8006 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
8007 }
8008 return rc;
8009}
8010
8011
8012#ifdef SOME_UNUSED_FUNCTION
8013/**
8014 * Fetches a data dword and sign extends it to a qword.
8015 *
8016 * @returns Strict VBox status code.
8017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8018 * @param pu64Dst Where to return the sign extended value.
8019 * @param iSegReg The index of the segment register to use for
8020 * this access. The base and limits are checked.
8021 * @param GCPtrMem The address of the guest memory.
8022 */
8023VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8024{
8025 /* The lazy approach for now... */
8026 uint8_t bUnmapInfo;
8027 int32_t const *pi32Src;
8028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
8029 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
8030 if (rc == VINF_SUCCESS)
8031 {
8032 *pu64Dst = *pi32Src;
8033 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8034 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
8035 }
8036#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8037 else
8038 *pu64Dst = 0;
8039#endif
8040 return rc;
8041}
8042#endif
8043
8044
8045/**
8046 * Fetches a descriptor register (lgdt, lidt).
8047 *
8048 * @returns Strict VBox status code.
8049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8050 * @param pcbLimit Where to return the limit.
8051 * @param pGCPtrBase Where to return the base.
8052 * @param iSegReg The index of the segment register to use for
8053 * this access. The base and limits are checked.
8054 * @param GCPtrMem The address of the guest memory.
8055 * @param enmOpSize The effective operand size.
8056 */
8057VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
8058 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
8059{
8060 /*
8061 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
8062 * little special:
8063 * - The two reads are done separately.
8064 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
8065 * - We suspect the 386 to actually commit the limit before the base in
8066 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
8067 * don't try emulate this eccentric behavior, because it's not well
8068 * enough understood and rather hard to trigger.
8069 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
8070 */
8071 VBOXSTRICTRC rcStrict;
8072 if (IEM_IS_64BIT_CODE(pVCpu))
8073 {
8074 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8075 if (rcStrict == VINF_SUCCESS)
8076 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
8077 }
8078 else
8079 {
8080 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
8081 if (enmOpSize == IEMMODE_32BIT)
8082 {
8083 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
8084 {
8085 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8086 if (rcStrict == VINF_SUCCESS)
8087 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8088 }
8089 else
8090 {
8091 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
8092 if (rcStrict == VINF_SUCCESS)
8093 {
8094 *pcbLimit = (uint16_t)uTmp;
8095 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8096 }
8097 }
8098 if (rcStrict == VINF_SUCCESS)
8099 *pGCPtrBase = uTmp;
8100 }
8101 else
8102 {
8103 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8104 if (rcStrict == VINF_SUCCESS)
8105 {
8106 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8107 if (rcStrict == VINF_SUCCESS)
8108 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
8109 }
8110 }
8111 }
8112 return rcStrict;
8113}
8114
8115
8116/**
8117 * Stores a data dqword, SSE aligned.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8121 * @param iSegReg The index of the segment register to use for
8122 * this access. The base and limits are checked.
8123 * @param GCPtrMem The address of the guest memory.
8124 * @param u128Value The value to store.
8125 */
8126VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
8127{
8128 /* The lazy approach for now... */
8129 uint8_t bUnmapInfo;
8130 PRTUINT128U pu128Dst;
8131 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8132 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8133 if (rc == VINF_SUCCESS)
8134 {
8135 pu128Dst->au64[0] = u128Value.au64[0];
8136 pu128Dst->au64[1] = u128Value.au64[1];
8137 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8138 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8139 }
8140 return rc;
8141}
8142
8143
8144#ifdef IEM_WITH_SETJMP
8145/**
8146 * Stores a data dqword, SSE aligned.
8147 *
8148 * @returns Strict VBox status code.
8149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8150 * @param iSegReg The index of the segment register to use for
8151 * this access. The base and limits are checked.
8152 * @param GCPtrMem The address of the guest memory.
8153 * @param u128Value The value to store.
8154 */
8155void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8156 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
8157{
8158 /* The lazy approach for now... */
8159 uint8_t bUnmapInfo;
8160 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8161 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8162 pu128Dst->au64[0] = u128Value.au64[0];
8163 pu128Dst->au64[1] = u128Value.au64[1];
8164 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
8165 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8166}
8167#endif
8168
8169
8170/**
8171 * Stores a data dqword.
8172 *
8173 * @returns Strict VBox status code.
8174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8175 * @param iSegReg The index of the segment register to use for
8176 * this access. The base and limits are checked.
8177 * @param GCPtrMem The address of the guest memory.
8178 * @param pu256Value Pointer to the value to store.
8179 */
8180VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8181{
8182 /* The lazy approach for now... */
8183 uint8_t bUnmapInfo;
8184 PRTUINT256U pu256Dst;
8185 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8186 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8187 if (rc == VINF_SUCCESS)
8188 {
8189 pu256Dst->au64[0] = pu256Value->au64[0];
8190 pu256Dst->au64[1] = pu256Value->au64[1];
8191 pu256Dst->au64[2] = pu256Value->au64[2];
8192 pu256Dst->au64[3] = pu256Value->au64[3];
8193 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8194 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8195 }
8196 return rc;
8197}
8198
8199
8200#ifdef IEM_WITH_SETJMP
8201/**
8202 * Stores a data dqword, longjmp on error.
8203 *
8204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8205 * @param iSegReg The index of the segment register to use for
8206 * this access. The base and limits are checked.
8207 * @param GCPtrMem The address of the guest memory.
8208 * @param pu256Value Pointer to the value to store.
8209 */
8210void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8211{
8212 /* The lazy approach for now... */
8213 uint8_t bUnmapInfo;
8214 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8215 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8216 pu256Dst->au64[0] = pu256Value->au64[0];
8217 pu256Dst->au64[1] = pu256Value->au64[1];
8218 pu256Dst->au64[2] = pu256Value->au64[2];
8219 pu256Dst->au64[3] = pu256Value->au64[3];
8220 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
8221 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8222}
8223#endif
8224
8225
8226/**
8227 * Stores a descriptor register (sgdt, sidt).
8228 *
8229 * @returns Strict VBox status code.
8230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8231 * @param cbLimit The limit.
8232 * @param GCPtrBase The base address.
8233 * @param iSegReg The index of the segment register to use for
8234 * this access. The base and limits are checked.
8235 * @param GCPtrMem The address of the guest memory.
8236 */
8237VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8238{
8239 /*
8240 * The SIDT and SGDT instructions actually stores the data using two
8241 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8242 * does not respond to opsize prefixes.
8243 */
8244 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8245 if (rcStrict == VINF_SUCCESS)
8246 {
8247 if (IEM_IS_16BIT_CODE(pVCpu))
8248 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8249 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8250 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8251 else if (IEM_IS_32BIT_CODE(pVCpu))
8252 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8253 else
8254 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8255 }
8256 return rcStrict;
8257}
8258
8259
8260/**
8261 * Begin a special stack push (used by interrupt, exceptions and such).
8262 *
8263 * This will raise \#SS or \#PF if appropriate.
8264 *
8265 * @returns Strict VBox status code.
8266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8267 * @param cbMem The number of bytes to push onto the stack.
8268 * @param cbAlign The alignment mask (7, 3, 1).
8269 * @param ppvMem Where to return the pointer to the stack memory.
8270 * As with the other memory functions this could be
8271 * direct access or bounce buffered access, so
8272 * don't commit register until the commit call
8273 * succeeds.
8274 * @param pbUnmapInfo Where to store unmap info for
8275 * iemMemStackPushCommitSpecial.
8276 * @param puNewRsp Where to return the new RSP value. This must be
8277 * passed unchanged to
8278 * iemMemStackPushCommitSpecial().
8279 */
8280VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8281 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8282{
8283 Assert(cbMem < UINT8_MAX);
8284 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8285 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
8286}
8287
8288
8289/**
8290 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8291 *
8292 * This will update the rSP.
8293 *
8294 * @returns Strict VBox status code.
8295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8296 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
8297 * @param uNewRsp The new RSP value returned by
8298 * iemMemStackPushBeginSpecial().
8299 */
8300VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
8301{
8302 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8303 if (rcStrict == VINF_SUCCESS)
8304 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8305 return rcStrict;
8306}
8307
8308
8309/**
8310 * Begin a special stack pop (used by iret, retf and such).
8311 *
8312 * This will raise \#SS or \#PF if appropriate.
8313 *
8314 * @returns Strict VBox status code.
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 * @param cbMem The number of bytes to pop from the stack.
8317 * @param cbAlign The alignment mask (7, 3, 1).
8318 * @param ppvMem Where to return the pointer to the stack memory.
8319 * @param pbUnmapInfo Where to store unmap info for
8320 * iemMemStackPopDoneSpecial.
8321 * @param puNewRsp Where to return the new RSP value. This must be
8322 * assigned to CPUMCTX::rsp manually some time
8323 * after iemMemStackPopDoneSpecial() has been
8324 * called.
8325 */
8326VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8327 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8328{
8329 Assert(cbMem < UINT8_MAX);
8330 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8331 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8332}
8333
8334
8335/**
8336 * Continue a special stack pop (used by iret and retf), for the purpose of
8337 * retrieving a new stack pointer.
8338 *
8339 * This will raise \#SS or \#PF if appropriate.
8340 *
8341 * @returns Strict VBox status code.
8342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8343 * @param off Offset from the top of the stack. This is zero
8344 * except in the retf case.
8345 * @param cbMem The number of bytes to pop from the stack.
8346 * @param ppvMem Where to return the pointer to the stack memory.
8347 * @param pbUnmapInfo Where to store unmap info for
8348 * iemMemStackPopDoneSpecial.
8349 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8350 * return this because all use of this function is
8351 * to retrieve a new value and anything we return
8352 * here would be discarded.)
8353 */
8354VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8355 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8356{
8357 Assert(cbMem < UINT8_MAX);
8358
8359 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8360 RTGCPTR GCPtrTop;
8361 if (IEM_IS_64BIT_CODE(pVCpu))
8362 GCPtrTop = uCurNewRsp;
8363 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8364 GCPtrTop = (uint32_t)uCurNewRsp;
8365 else
8366 GCPtrTop = (uint16_t)uCurNewRsp;
8367
8368 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8369 0 /* checked in iemMemStackPopBeginSpecial */);
8370}
8371
8372
8373/**
8374 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8375 * iemMemStackPopContinueSpecial).
8376 *
8377 * The caller will manually commit the rSP.
8378 *
8379 * @returns Strict VBox status code.
8380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8381 * @param bUnmapInfo Unmap information returned by
8382 * iemMemStackPopBeginSpecial() or
8383 * iemMemStackPopContinueSpecial().
8384 */
8385VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8386{
8387 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8388}
8389
8390
8391/**
8392 * Fetches a system table byte.
8393 *
8394 * @returns Strict VBox status code.
8395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8396 * @param pbDst Where to return the byte.
8397 * @param iSegReg The index of the segment register to use for
8398 * this access. The base and limits are checked.
8399 * @param GCPtrMem The address of the guest memory.
8400 */
8401VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8402{
8403 /* The lazy approach for now... */
8404 uint8_t bUnmapInfo;
8405 uint8_t const *pbSrc;
8406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8407 if (rc == VINF_SUCCESS)
8408 {
8409 *pbDst = *pbSrc;
8410 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8411 }
8412 return rc;
8413}
8414
8415
8416/**
8417 * Fetches a system table word.
8418 *
8419 * @returns Strict VBox status code.
8420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8421 * @param pu16Dst Where to return the word.
8422 * @param iSegReg The index of the segment register to use for
8423 * this access. The base and limits are checked.
8424 * @param GCPtrMem The address of the guest memory.
8425 */
8426VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8427{
8428 /* The lazy approach for now... */
8429 uint8_t bUnmapInfo;
8430 uint16_t const *pu16Src;
8431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8432 if (rc == VINF_SUCCESS)
8433 {
8434 *pu16Dst = *pu16Src;
8435 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8436 }
8437 return rc;
8438}
8439
8440
8441/**
8442 * Fetches a system table dword.
8443 *
8444 * @returns Strict VBox status code.
8445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8446 * @param pu32Dst Where to return the dword.
8447 * @param iSegReg The index of the segment register to use for
8448 * this access. The base and limits are checked.
8449 * @param GCPtrMem The address of the guest memory.
8450 */
8451VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8452{
8453 /* The lazy approach for now... */
8454 uint8_t bUnmapInfo;
8455 uint32_t const *pu32Src;
8456 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8457 if (rc == VINF_SUCCESS)
8458 {
8459 *pu32Dst = *pu32Src;
8460 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8461 }
8462 return rc;
8463}
8464
8465
8466/**
8467 * Fetches a system table qword.
8468 *
8469 * @returns Strict VBox status code.
8470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8471 * @param pu64Dst Where to return the qword.
8472 * @param iSegReg The index of the segment register to use for
8473 * this access. The base and limits are checked.
8474 * @param GCPtrMem The address of the guest memory.
8475 */
8476VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8477{
8478 /* The lazy approach for now... */
8479 uint8_t bUnmapInfo;
8480 uint64_t const *pu64Src;
8481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8482 if (rc == VINF_SUCCESS)
8483 {
8484 *pu64Dst = *pu64Src;
8485 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8486 }
8487 return rc;
8488}
8489
8490
8491/**
8492 * Fetches a descriptor table entry with caller specified error code.
8493 *
8494 * @returns Strict VBox status code.
8495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8496 * @param pDesc Where to return the descriptor table entry.
8497 * @param uSel The selector which table entry to fetch.
8498 * @param uXcpt The exception to raise on table lookup error.
8499 * @param uErrorCode The error code associated with the exception.
8500 */
8501static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8502 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8503{
8504 AssertPtr(pDesc);
8505 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8506
8507 /** @todo did the 286 require all 8 bytes to be accessible? */
8508 /*
8509 * Get the selector table base and check bounds.
8510 */
8511 RTGCPTR GCPtrBase;
8512 if (uSel & X86_SEL_LDT)
8513 {
8514 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8515 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8516 {
8517 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8518 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8519 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8520 uErrorCode, 0);
8521 }
8522
8523 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8524 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8525 }
8526 else
8527 {
8528 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8529 {
8530 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8531 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8532 uErrorCode, 0);
8533 }
8534 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8535 }
8536
8537 /*
8538 * Read the legacy descriptor and maybe the long mode extensions if
8539 * required.
8540 */
8541 VBOXSTRICTRC rcStrict;
8542 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8543 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8544 else
8545 {
8546 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8547 if (rcStrict == VINF_SUCCESS)
8548 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8549 if (rcStrict == VINF_SUCCESS)
8550 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8551 if (rcStrict == VINF_SUCCESS)
8552 pDesc->Legacy.au16[3] = 0;
8553 else
8554 return rcStrict;
8555 }
8556
8557 if (rcStrict == VINF_SUCCESS)
8558 {
8559 if ( !IEM_IS_LONG_MODE(pVCpu)
8560 || pDesc->Legacy.Gen.u1DescType)
8561 pDesc->Long.au64[1] = 0;
8562 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8563 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8564 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8565 else
8566 {
8567 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8568 /** @todo is this the right exception? */
8569 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8570 }
8571 }
8572 return rcStrict;
8573}
8574
8575
8576/**
8577 * Fetches a descriptor table entry.
8578 *
8579 * @returns Strict VBox status code.
8580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8581 * @param pDesc Where to return the descriptor table entry.
8582 * @param uSel The selector which table entry to fetch.
8583 * @param uXcpt The exception to raise on table lookup error.
8584 */
8585VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8586{
8587 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8588}
8589
8590
8591/**
8592 * Marks the selector descriptor as accessed (only non-system descriptors).
8593 *
8594 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8595 * will therefore skip the limit checks.
8596 *
8597 * @returns Strict VBox status code.
8598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8599 * @param uSel The selector.
8600 */
8601VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8602{
8603 /*
8604 * Get the selector table base and calculate the entry address.
8605 */
8606 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8607 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8608 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8609 GCPtr += uSel & X86_SEL_MASK;
8610
8611 /*
8612 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8613 * ugly stuff to avoid this. This will make sure it's an atomic access
8614 * as well more or less remove any question about 8-bit or 32-bit accesss.
8615 */
8616 VBOXSTRICTRC rcStrict;
8617 uint8_t bUnmapInfo;
8618 uint32_t volatile *pu32;
8619 if ((GCPtr & 3) == 0)
8620 {
8621 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8622 GCPtr += 2 + 2;
8623 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8624 if (rcStrict != VINF_SUCCESS)
8625 return rcStrict;
8626 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8627 }
8628 else
8629 {
8630 /* The misaligned GDT/LDT case, map the whole thing. */
8631 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8632 if (rcStrict != VINF_SUCCESS)
8633 return rcStrict;
8634 switch ((uintptr_t)pu32 & 3)
8635 {
8636 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8637 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8638 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8639 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8640 }
8641 }
8642
8643 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8644}
8645
8646
8647#undef LOG_GROUP
8648#define LOG_GROUP LOG_GROUP_IEM
8649
8650/** @} */
8651
8652/** @name Opcode Helpers.
8653 * @{
8654 */
8655
8656/**
8657 * Calculates the effective address of a ModR/M memory operand.
8658 *
8659 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8660 *
8661 * @return Strict VBox status code.
8662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8663 * @param bRm The ModRM byte.
8664 * @param cbImmAndRspOffset - First byte: The size of any immediate
8665 * following the effective address opcode bytes
8666 * (only for RIP relative addressing).
8667 * - Second byte: RSP displacement (for POP [ESP]).
8668 * @param pGCPtrEff Where to return the effective address.
8669 */
8670VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8671{
8672 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8673# define SET_SS_DEF() \
8674 do \
8675 { \
8676 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8677 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8678 } while (0)
8679
8680 if (!IEM_IS_64BIT_CODE(pVCpu))
8681 {
8682/** @todo Check the effective address size crap! */
8683 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8684 {
8685 uint16_t u16EffAddr;
8686
8687 /* Handle the disp16 form with no registers first. */
8688 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8689 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8690 else
8691 {
8692 /* Get the displacment. */
8693 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8694 {
8695 case 0: u16EffAddr = 0; break;
8696 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8697 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8698 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8699 }
8700
8701 /* Add the base and index registers to the disp. */
8702 switch (bRm & X86_MODRM_RM_MASK)
8703 {
8704 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8705 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8706 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8707 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8708 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8709 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8710 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8711 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8712 }
8713 }
8714
8715 *pGCPtrEff = u16EffAddr;
8716 }
8717 else
8718 {
8719 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8720 uint32_t u32EffAddr;
8721
8722 /* Handle the disp32 form with no registers first. */
8723 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8724 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8725 else
8726 {
8727 /* Get the register (or SIB) value. */
8728 switch ((bRm & X86_MODRM_RM_MASK))
8729 {
8730 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8731 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8732 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8733 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8734 case 4: /* SIB */
8735 {
8736 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8737
8738 /* Get the index and scale it. */
8739 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8740 {
8741 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8742 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8743 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8744 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8745 case 4: u32EffAddr = 0; /*none */ break;
8746 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8747 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8748 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8749 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8750 }
8751 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8752
8753 /* add base */
8754 switch (bSib & X86_SIB_BASE_MASK)
8755 {
8756 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8757 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8758 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8759 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8760 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8761 case 5:
8762 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8763 {
8764 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8765 SET_SS_DEF();
8766 }
8767 else
8768 {
8769 uint32_t u32Disp;
8770 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8771 u32EffAddr += u32Disp;
8772 }
8773 break;
8774 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8775 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8777 }
8778 break;
8779 }
8780 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8781 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8782 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8783 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8784 }
8785
8786 /* Get and add the displacement. */
8787 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8788 {
8789 case 0:
8790 break;
8791 case 1:
8792 {
8793 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8794 u32EffAddr += i8Disp;
8795 break;
8796 }
8797 case 2:
8798 {
8799 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8800 u32EffAddr += u32Disp;
8801 break;
8802 }
8803 default:
8804 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8805 }
8806
8807 }
8808 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8809 *pGCPtrEff = u32EffAddr;
8810 }
8811 }
8812 else
8813 {
8814 uint64_t u64EffAddr;
8815
8816 /* Handle the rip+disp32 form with no registers first. */
8817 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8818 {
8819 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8820 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8821 }
8822 else
8823 {
8824 /* Get the register (or SIB) value. */
8825 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8826 {
8827 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8828 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8829 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8830 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8831 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8832 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8833 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8834 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8835 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8836 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8837 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8838 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8839 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8840 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8841 /* SIB */
8842 case 4:
8843 case 12:
8844 {
8845 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8846
8847 /* Get the index and scale it. */
8848 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8849 {
8850 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8851 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8852 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8853 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8854 case 4: u64EffAddr = 0; /*none */ break;
8855 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8856 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8857 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8858 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8859 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8860 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8861 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8862 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8863 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8864 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8865 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8866 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8867 }
8868 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8869
8870 /* add base */
8871 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8872 {
8873 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8874 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8875 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8876 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8877 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8878 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8879 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8880 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8881 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8882 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8883 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8884 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8885 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8886 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8887 /* complicated encodings */
8888 case 5:
8889 case 13:
8890 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8891 {
8892 if (!pVCpu->iem.s.uRexB)
8893 {
8894 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8895 SET_SS_DEF();
8896 }
8897 else
8898 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8899 }
8900 else
8901 {
8902 uint32_t u32Disp;
8903 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8904 u64EffAddr += (int32_t)u32Disp;
8905 }
8906 break;
8907 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8908 }
8909 break;
8910 }
8911 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8912 }
8913
8914 /* Get and add the displacement. */
8915 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8916 {
8917 case 0:
8918 break;
8919 case 1:
8920 {
8921 int8_t i8Disp;
8922 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8923 u64EffAddr += i8Disp;
8924 break;
8925 }
8926 case 2:
8927 {
8928 uint32_t u32Disp;
8929 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8930 u64EffAddr += (int32_t)u32Disp;
8931 break;
8932 }
8933 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8934 }
8935
8936 }
8937
8938 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8939 *pGCPtrEff = u64EffAddr;
8940 else
8941 {
8942 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8943 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8944 }
8945 }
8946
8947 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8948 return VINF_SUCCESS;
8949}
8950
8951
8952#ifdef IEM_WITH_SETJMP
8953/**
8954 * Calculates the effective address of a ModR/M memory operand.
8955 *
8956 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8957 *
8958 * May longjmp on internal error.
8959 *
8960 * @return The effective address.
8961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8962 * @param bRm The ModRM byte.
8963 * @param cbImmAndRspOffset - First byte: The size of any immediate
8964 * following the effective address opcode bytes
8965 * (only for RIP relative addressing).
8966 * - Second byte: RSP displacement (for POP [ESP]).
8967 */
8968RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8969{
8970 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8971# define SET_SS_DEF() \
8972 do \
8973 { \
8974 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8975 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8976 } while (0)
8977
8978 if (!IEM_IS_64BIT_CODE(pVCpu))
8979 {
8980/** @todo Check the effective address size crap! */
8981 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8982 {
8983 uint16_t u16EffAddr;
8984
8985 /* Handle the disp16 form with no registers first. */
8986 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8987 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8988 else
8989 {
8990 /* Get the displacment. */
8991 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8992 {
8993 case 0: u16EffAddr = 0; break;
8994 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8995 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8996 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8997 }
8998
8999 /* Add the base and index registers to the disp. */
9000 switch (bRm & X86_MODRM_RM_MASK)
9001 {
9002 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9003 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9004 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9005 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9006 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9007 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9008 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9009 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9010 }
9011 }
9012
9013 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9014 return u16EffAddr;
9015 }
9016
9017 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9018 uint32_t u32EffAddr;
9019
9020 /* Handle the disp32 form with no registers first. */
9021 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9022 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9023 else
9024 {
9025 /* Get the register (or SIB) value. */
9026 switch ((bRm & X86_MODRM_RM_MASK))
9027 {
9028 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9029 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9030 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9031 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9032 case 4: /* SIB */
9033 {
9034 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9035
9036 /* Get the index and scale it. */
9037 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9038 {
9039 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9040 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9041 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9042 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9043 case 4: u32EffAddr = 0; /*none */ break;
9044 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9045 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9046 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9047 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9048 }
9049 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9050
9051 /* add base */
9052 switch (bSib & X86_SIB_BASE_MASK)
9053 {
9054 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9055 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9056 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9057 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9058 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9059 case 5:
9060 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9061 {
9062 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9063 SET_SS_DEF();
9064 }
9065 else
9066 {
9067 uint32_t u32Disp;
9068 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9069 u32EffAddr += u32Disp;
9070 }
9071 break;
9072 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9073 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9075 }
9076 break;
9077 }
9078 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9079 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9080 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9081 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9082 }
9083
9084 /* Get and add the displacement. */
9085 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9086 {
9087 case 0:
9088 break;
9089 case 1:
9090 {
9091 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9092 u32EffAddr += i8Disp;
9093 break;
9094 }
9095 case 2:
9096 {
9097 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9098 u32EffAddr += u32Disp;
9099 break;
9100 }
9101 default:
9102 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9103 }
9104 }
9105
9106 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9107 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9108 return u32EffAddr;
9109 }
9110
9111 uint64_t u64EffAddr;
9112
9113 /* Handle the rip+disp32 form with no registers first. */
9114 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9115 {
9116 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9117 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9118 }
9119 else
9120 {
9121 /* Get the register (or SIB) value. */
9122 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9123 {
9124 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9125 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9126 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9127 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9128 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9129 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9130 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9131 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9132 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9133 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9134 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9135 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9136 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9137 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9138 /* SIB */
9139 case 4:
9140 case 12:
9141 {
9142 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9143
9144 /* Get the index and scale it. */
9145 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9146 {
9147 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9148 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9149 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9150 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9151 case 4: u64EffAddr = 0; /*none */ break;
9152 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9153 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9154 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9155 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9156 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9157 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9158 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9159 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9160 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9161 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9162 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9163 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9164 }
9165 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9166
9167 /* add base */
9168 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9169 {
9170 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9171 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9172 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9173 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9174 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9175 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9176 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9177 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9178 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9179 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9180 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9181 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9182 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9183 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9184 /* complicated encodings */
9185 case 5:
9186 case 13:
9187 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9188 {
9189 if (!pVCpu->iem.s.uRexB)
9190 {
9191 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9192 SET_SS_DEF();
9193 }
9194 else
9195 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9196 }
9197 else
9198 {
9199 uint32_t u32Disp;
9200 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9201 u64EffAddr += (int32_t)u32Disp;
9202 }
9203 break;
9204 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9205 }
9206 break;
9207 }
9208 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9209 }
9210
9211 /* Get and add the displacement. */
9212 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9213 {
9214 case 0:
9215 break;
9216 case 1:
9217 {
9218 int8_t i8Disp;
9219 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9220 u64EffAddr += i8Disp;
9221 break;
9222 }
9223 case 2:
9224 {
9225 uint32_t u32Disp;
9226 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9227 u64EffAddr += (int32_t)u32Disp;
9228 break;
9229 }
9230 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9231 }
9232
9233 }
9234
9235 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9236 {
9237 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9238 return u64EffAddr;
9239 }
9240 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9241 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9242 return u64EffAddr & UINT32_MAX;
9243}
9244#endif /* IEM_WITH_SETJMP */
9245
9246
9247/**
9248 * Calculates the effective address of a ModR/M memory operand, extended version
9249 * for use in the recompilers.
9250 *
9251 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9252 *
9253 * @return Strict VBox status code.
9254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9255 * @param bRm The ModRM byte.
9256 * @param cbImmAndRspOffset - First byte: The size of any immediate
9257 * following the effective address opcode bytes
9258 * (only for RIP relative addressing).
9259 * - Second byte: RSP displacement (for POP [ESP]).
9260 * @param pGCPtrEff Where to return the effective address.
9261 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9262 * SIB byte (bits 39:32).
9263 */
9264VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9265{
9266 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9267# define SET_SS_DEF() \
9268 do \
9269 { \
9270 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9271 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9272 } while (0)
9273
9274 uint64_t uInfo;
9275 if (!IEM_IS_64BIT_CODE(pVCpu))
9276 {
9277/** @todo Check the effective address size crap! */
9278 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9279 {
9280 uint16_t u16EffAddr;
9281
9282 /* Handle the disp16 form with no registers first. */
9283 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9284 {
9285 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9286 uInfo = u16EffAddr;
9287 }
9288 else
9289 {
9290 /* Get the displacment. */
9291 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9292 {
9293 case 0: u16EffAddr = 0; break;
9294 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9295 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9296 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9297 }
9298 uInfo = u16EffAddr;
9299
9300 /* Add the base and index registers to the disp. */
9301 switch (bRm & X86_MODRM_RM_MASK)
9302 {
9303 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9304 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9305 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9306 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9307 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9308 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9309 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9310 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9311 }
9312 }
9313
9314 *pGCPtrEff = u16EffAddr;
9315 }
9316 else
9317 {
9318 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9319 uint32_t u32EffAddr;
9320
9321 /* Handle the disp32 form with no registers first. */
9322 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9323 {
9324 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9325 uInfo = u32EffAddr;
9326 }
9327 else
9328 {
9329 /* Get the register (or SIB) value. */
9330 uInfo = 0;
9331 switch ((bRm & X86_MODRM_RM_MASK))
9332 {
9333 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9334 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9335 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9336 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9337 case 4: /* SIB */
9338 {
9339 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9340 uInfo = (uint64_t)bSib << 32;
9341
9342 /* Get the index and scale it. */
9343 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9344 {
9345 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9346 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9347 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9348 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9349 case 4: u32EffAddr = 0; /*none */ break;
9350 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9351 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9352 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9354 }
9355 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9356
9357 /* add base */
9358 switch (bSib & X86_SIB_BASE_MASK)
9359 {
9360 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9361 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9362 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9363 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9364 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9365 case 5:
9366 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9367 {
9368 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9369 SET_SS_DEF();
9370 }
9371 else
9372 {
9373 uint32_t u32Disp;
9374 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9375 u32EffAddr += u32Disp;
9376 uInfo |= u32Disp;
9377 }
9378 break;
9379 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9380 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9381 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9382 }
9383 break;
9384 }
9385 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9386 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9387 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9388 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9389 }
9390
9391 /* Get and add the displacement. */
9392 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9393 {
9394 case 0:
9395 break;
9396 case 1:
9397 {
9398 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9399 u32EffAddr += i8Disp;
9400 uInfo |= (uint32_t)(int32_t)i8Disp;
9401 break;
9402 }
9403 case 2:
9404 {
9405 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9406 u32EffAddr += u32Disp;
9407 uInfo |= (uint32_t)u32Disp;
9408 break;
9409 }
9410 default:
9411 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9412 }
9413
9414 }
9415 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9416 *pGCPtrEff = u32EffAddr;
9417 }
9418 }
9419 else
9420 {
9421 uint64_t u64EffAddr;
9422
9423 /* Handle the rip+disp32 form with no registers first. */
9424 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9425 {
9426 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9427 uInfo = (uint32_t)u64EffAddr;
9428 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9429 }
9430 else
9431 {
9432 /* Get the register (or SIB) value. */
9433 uInfo = 0;
9434 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9435 {
9436 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9437 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9438 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9439 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9440 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9441 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9442 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9443 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9444 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9445 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9446 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9447 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9448 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9449 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9450 /* SIB */
9451 case 4:
9452 case 12:
9453 {
9454 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9455 uInfo = (uint64_t)bSib << 32;
9456
9457 /* Get the index and scale it. */
9458 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9459 {
9460 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9461 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9462 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9463 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9464 case 4: u64EffAddr = 0; /*none */ break;
9465 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9466 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9467 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9468 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9469 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9470 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9471 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9472 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9473 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9474 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9475 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9476 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9477 }
9478 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9479
9480 /* add base */
9481 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9482 {
9483 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9484 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9485 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9486 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9487 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9488 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9489 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9490 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9491 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9492 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9493 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9494 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9495 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9496 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9497 /* complicated encodings */
9498 case 5:
9499 case 13:
9500 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9501 {
9502 if (!pVCpu->iem.s.uRexB)
9503 {
9504 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9505 SET_SS_DEF();
9506 }
9507 else
9508 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9509 }
9510 else
9511 {
9512 uint32_t u32Disp;
9513 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9514 u64EffAddr += (int32_t)u32Disp;
9515 uInfo |= u32Disp;
9516 }
9517 break;
9518 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9519 }
9520 break;
9521 }
9522 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9523 }
9524
9525 /* Get and add the displacement. */
9526 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9527 {
9528 case 0:
9529 break;
9530 case 1:
9531 {
9532 int8_t i8Disp;
9533 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9534 u64EffAddr += i8Disp;
9535 uInfo |= (uint32_t)(int32_t)i8Disp;
9536 break;
9537 }
9538 case 2:
9539 {
9540 uint32_t u32Disp;
9541 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9542 u64EffAddr += (int32_t)u32Disp;
9543 uInfo |= u32Disp;
9544 break;
9545 }
9546 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9547 }
9548
9549 }
9550
9551 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9552 *pGCPtrEff = u64EffAddr;
9553 else
9554 {
9555 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9556 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9557 }
9558 }
9559 *puInfo = uInfo;
9560
9561 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9562 return VINF_SUCCESS;
9563}
9564
9565/** @} */
9566
9567
9568#ifdef LOG_ENABLED
9569/**
9570 * Logs the current instruction.
9571 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9572 * @param fSameCtx Set if we have the same context information as the VMM,
9573 * clear if we may have already executed an instruction in
9574 * our debug context. When clear, we assume IEMCPU holds
9575 * valid CPU mode info.
9576 *
9577 * The @a fSameCtx parameter is now misleading and obsolete.
9578 * @param pszFunction The IEM function doing the execution.
9579 */
9580static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9581{
9582# ifdef IN_RING3
9583 if (LogIs2Enabled())
9584 {
9585 char szInstr[256];
9586 uint32_t cbInstr = 0;
9587 if (fSameCtx)
9588 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9589 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9590 szInstr, sizeof(szInstr), &cbInstr);
9591 else
9592 {
9593 uint32_t fFlags = 0;
9594 switch (IEM_GET_CPU_MODE(pVCpu))
9595 {
9596 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9597 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9598 case IEMMODE_16BIT:
9599 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9600 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9601 else
9602 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9603 break;
9604 }
9605 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9606 szInstr, sizeof(szInstr), &cbInstr);
9607 }
9608
9609 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9610 Log2(("**** %s fExec=%x\n"
9611 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9612 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9613 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9614 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9615 " %s\n"
9616 , pszFunction, pVCpu->iem.s.fExec,
9617 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9618 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9619 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9620 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9621 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9622 szInstr));
9623
9624 /* This stuff sucks atm. as it fills the log with MSRs. */
9625 //if (LogIs3Enabled())
9626 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9627 }
9628 else
9629# endif
9630 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9631 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9632 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9633}
9634#endif /* LOG_ENABLED */
9635
9636
9637#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9638/**
9639 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9640 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9641 *
9642 * @returns Modified rcStrict.
9643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9644 * @param rcStrict The instruction execution status.
9645 */
9646static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9647{
9648 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9649 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9650 {
9651 /* VMX preemption timer takes priority over NMI-window exits. */
9652 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9653 {
9654 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9655 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9656 }
9657 /*
9658 * Check remaining intercepts.
9659 *
9660 * NMI-window and Interrupt-window VM-exits.
9661 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9662 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9663 *
9664 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9665 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9666 */
9667 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9668 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9669 && !TRPMHasTrap(pVCpu))
9670 {
9671 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9672 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9673 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9674 {
9675 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9676 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9677 }
9678 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9679 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9680 {
9681 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9682 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9683 }
9684 }
9685 }
9686 /* TPR-below threshold/APIC write has the highest priority. */
9687 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9688 {
9689 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9690 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9691 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9692 }
9693 /* MTF takes priority over VMX-preemption timer. */
9694 else
9695 {
9696 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9697 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9698 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9699 }
9700 return rcStrict;
9701}
9702#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9703
9704
9705/**
9706 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9707 * IEMExecOneWithPrefetchedByPC.
9708 *
9709 * Similar code is found in IEMExecLots.
9710 *
9711 * @return Strict VBox status code.
9712 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9713 * @param fExecuteInhibit If set, execute the instruction following CLI,
9714 * POP SS and MOV SS,GR.
9715 * @param pszFunction The calling function name.
9716 */
9717DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9718{
9719 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9720 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9721 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9722 RT_NOREF_PV(pszFunction);
9723
9724#ifdef IEM_WITH_SETJMP
9725 VBOXSTRICTRC rcStrict;
9726 IEM_TRY_SETJMP(pVCpu, rcStrict)
9727 {
9728 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9729 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9730 }
9731 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9732 {
9733 pVCpu->iem.s.cLongJumps++;
9734 }
9735 IEM_CATCH_LONGJMP_END(pVCpu);
9736#else
9737 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9738 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9739#endif
9740 if (rcStrict == VINF_SUCCESS)
9741 pVCpu->iem.s.cInstructions++;
9742 if (pVCpu->iem.s.cActiveMappings > 0)
9743 {
9744 Assert(rcStrict != VINF_SUCCESS);
9745 iemMemRollback(pVCpu);
9746 }
9747 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9748 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9749 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9750
9751//#ifdef DEBUG
9752// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9753//#endif
9754
9755#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9756 /*
9757 * Perform any VMX nested-guest instruction boundary actions.
9758 *
9759 * If any of these causes a VM-exit, we must skip executing the next
9760 * instruction (would run into stale page tables). A VM-exit makes sure
9761 * there is no interrupt-inhibition, so that should ensure we don't go
9762 * to try execute the next instruction. Clearing fExecuteInhibit is
9763 * problematic because of the setjmp/longjmp clobbering above.
9764 */
9765 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9766 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9767 || rcStrict != VINF_SUCCESS)
9768 { /* likely */ }
9769 else
9770 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9771#endif
9772
9773 /* Execute the next instruction as well if a cli, pop ss or
9774 mov ss, Gr has just completed successfully. */
9775 if ( fExecuteInhibit
9776 && rcStrict == VINF_SUCCESS
9777 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9778 {
9779 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9780 if (rcStrict == VINF_SUCCESS)
9781 {
9782#ifdef LOG_ENABLED
9783 iemLogCurInstr(pVCpu, false, pszFunction);
9784#endif
9785#ifdef IEM_WITH_SETJMP
9786 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9787 {
9788 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9789 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9790 }
9791 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9792 {
9793 pVCpu->iem.s.cLongJumps++;
9794 }
9795 IEM_CATCH_LONGJMP_END(pVCpu);
9796#else
9797 IEM_OPCODE_GET_FIRST_U8(&b);
9798 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9799#endif
9800 if (rcStrict == VINF_SUCCESS)
9801 {
9802 pVCpu->iem.s.cInstructions++;
9803#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9804 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9805 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9806 { /* likely */ }
9807 else
9808 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9809#endif
9810 }
9811 if (pVCpu->iem.s.cActiveMappings > 0)
9812 {
9813 Assert(rcStrict != VINF_SUCCESS);
9814 iemMemRollback(pVCpu);
9815 }
9816 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9817 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9818 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9819 }
9820 else if (pVCpu->iem.s.cActiveMappings > 0)
9821 iemMemRollback(pVCpu);
9822 /** @todo drop this after we bake this change into RIP advancing. */
9823 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9824 }
9825
9826 /*
9827 * Return value fiddling, statistics and sanity assertions.
9828 */
9829 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9830
9831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9832 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9833 return rcStrict;
9834}
9835
9836
9837/**
9838 * Execute one instruction.
9839 *
9840 * @return Strict VBox status code.
9841 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9842 */
9843VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9844{
9845 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9846#ifdef LOG_ENABLED
9847 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9848#endif
9849
9850 /*
9851 * Do the decoding and emulation.
9852 */
9853 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9854 if (rcStrict == VINF_SUCCESS)
9855 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9856 else if (pVCpu->iem.s.cActiveMappings > 0)
9857 iemMemRollback(pVCpu);
9858
9859 if (rcStrict != VINF_SUCCESS)
9860 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9861 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9862 return rcStrict;
9863}
9864
9865
9866VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9867{
9868 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9869 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9870 if (rcStrict == VINF_SUCCESS)
9871 {
9872 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9873 if (pcbWritten)
9874 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9875 }
9876 else if (pVCpu->iem.s.cActiveMappings > 0)
9877 iemMemRollback(pVCpu);
9878
9879 return rcStrict;
9880}
9881
9882
9883VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9884 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9885{
9886 VBOXSTRICTRC rcStrict;
9887 if ( cbOpcodeBytes
9888 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9889 {
9890 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9891#ifdef IEM_WITH_CODE_TLB
9892 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9893 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9894 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9895 pVCpu->iem.s.offCurInstrStart = 0;
9896 pVCpu->iem.s.offInstrNextByte = 0;
9897 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9898#else
9899 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9900 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9901#endif
9902 rcStrict = VINF_SUCCESS;
9903 }
9904 else
9905 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9906 if (rcStrict == VINF_SUCCESS)
9907 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9908 else if (pVCpu->iem.s.cActiveMappings > 0)
9909 iemMemRollback(pVCpu);
9910
9911 return rcStrict;
9912}
9913
9914
9915VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9916{
9917 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9918 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9919 if (rcStrict == VINF_SUCCESS)
9920 {
9921 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9922 if (pcbWritten)
9923 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9924 }
9925 else if (pVCpu->iem.s.cActiveMappings > 0)
9926 iemMemRollback(pVCpu);
9927
9928 return rcStrict;
9929}
9930
9931
9932VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9933 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9934{
9935 VBOXSTRICTRC rcStrict;
9936 if ( cbOpcodeBytes
9937 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9938 {
9939 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9940#ifdef IEM_WITH_CODE_TLB
9941 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9942 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9943 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9944 pVCpu->iem.s.offCurInstrStart = 0;
9945 pVCpu->iem.s.offInstrNextByte = 0;
9946 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9947#else
9948 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9949 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9950#endif
9951 rcStrict = VINF_SUCCESS;
9952 }
9953 else
9954 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9955 if (rcStrict == VINF_SUCCESS)
9956 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9957 else if (pVCpu->iem.s.cActiveMappings > 0)
9958 iemMemRollback(pVCpu);
9959
9960 return rcStrict;
9961}
9962
9963
9964/**
9965 * For handling split cacheline lock operations when the host has split-lock
9966 * detection enabled.
9967 *
9968 * This will cause the interpreter to disregard the lock prefix and implicit
9969 * locking (xchg).
9970 *
9971 * @returns Strict VBox status code.
9972 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9973 */
9974VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9975{
9976 /*
9977 * Do the decoding and emulation.
9978 */
9979 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9980 if (rcStrict == VINF_SUCCESS)
9981 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9982 else if (pVCpu->iem.s.cActiveMappings > 0)
9983 iemMemRollback(pVCpu);
9984
9985 if (rcStrict != VINF_SUCCESS)
9986 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9987 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9988 return rcStrict;
9989}
9990
9991
9992/**
9993 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9994 * inject a pending TRPM trap.
9995 */
9996VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9997{
9998 Assert(TRPMHasTrap(pVCpu));
9999
10000 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10001 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10002 {
10003 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10004#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10005 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10006 if (fIntrEnabled)
10007 {
10008 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10009 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10010 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10011 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10012 else
10013 {
10014 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10015 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10016 }
10017 }
10018#else
10019 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10020#endif
10021 if (fIntrEnabled)
10022 {
10023 uint8_t u8TrapNo;
10024 TRPMEVENT enmType;
10025 uint32_t uErrCode;
10026 RTGCPTR uCr2;
10027 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10028 AssertRC(rc2);
10029 Assert(enmType == TRPM_HARDWARE_INT);
10030 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10031
10032 TRPMResetTrap(pVCpu);
10033
10034#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10035 /* Injecting an event may cause a VM-exit. */
10036 if ( rcStrict != VINF_SUCCESS
10037 && rcStrict != VINF_IEM_RAISED_XCPT)
10038 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10039#else
10040 NOREF(rcStrict);
10041#endif
10042 }
10043 }
10044
10045 return VINF_SUCCESS;
10046}
10047
10048
10049VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10050{
10051 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10052 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10053 Assert(cMaxInstructions > 0);
10054
10055 /*
10056 * See if there is an interrupt pending in TRPM, inject it if we can.
10057 */
10058 /** @todo What if we are injecting an exception and not an interrupt? Is that
10059 * possible here? For now we assert it is indeed only an interrupt. */
10060 if (!TRPMHasTrap(pVCpu))
10061 { /* likely */ }
10062 else
10063 {
10064 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10065 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10066 { /*likely */ }
10067 else
10068 return rcStrict;
10069 }
10070
10071 /*
10072 * Initial decoder init w/ prefetch, then setup setjmp.
10073 */
10074 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10075 if (rcStrict == VINF_SUCCESS)
10076 {
10077#ifdef IEM_WITH_SETJMP
10078 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10079 IEM_TRY_SETJMP(pVCpu, rcStrict)
10080#endif
10081 {
10082 /*
10083 * The run loop. We limit ourselves to 4096 instructions right now.
10084 */
10085 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10086 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10087 for (;;)
10088 {
10089 /*
10090 * Log the state.
10091 */
10092#ifdef LOG_ENABLED
10093 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10094#endif
10095
10096 /*
10097 * Do the decoding and emulation.
10098 */
10099 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10100 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10101#ifdef VBOX_STRICT
10102 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10103#endif
10104 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10105 {
10106 Assert(pVCpu->iem.s.cActiveMappings == 0);
10107 pVCpu->iem.s.cInstructions++;
10108
10109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10110 /* Perform any VMX nested-guest instruction boundary actions. */
10111 uint64_t fCpu = pVCpu->fLocalForcedActions;
10112 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10113 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10114 { /* likely */ }
10115 else
10116 {
10117 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10118 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10119 fCpu = pVCpu->fLocalForcedActions;
10120 else
10121 {
10122 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10123 break;
10124 }
10125 }
10126#endif
10127 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10128 {
10129#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10130 uint64_t fCpu = pVCpu->fLocalForcedActions;
10131#endif
10132 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10133 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10134 | VMCPU_FF_TLB_FLUSH
10135 | VMCPU_FF_UNHALT );
10136
10137 if (RT_LIKELY( ( !fCpu
10138 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10139 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10140 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10141 {
10142 if (--cMaxInstructionsGccStupidity > 0)
10143 {
10144 /* Poll timers every now an then according to the caller's specs. */
10145 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10146 || !TMTimerPollBool(pVM, pVCpu))
10147 {
10148 Assert(pVCpu->iem.s.cActiveMappings == 0);
10149 iemReInitDecoder(pVCpu);
10150 continue;
10151 }
10152 }
10153 }
10154 }
10155 Assert(pVCpu->iem.s.cActiveMappings == 0);
10156 }
10157 else if (pVCpu->iem.s.cActiveMappings > 0)
10158 iemMemRollback(pVCpu);
10159 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10160 break;
10161 }
10162 }
10163#ifdef IEM_WITH_SETJMP
10164 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10165 {
10166 if (pVCpu->iem.s.cActiveMappings > 0)
10167 iemMemRollback(pVCpu);
10168# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10169 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10170# endif
10171 pVCpu->iem.s.cLongJumps++;
10172 }
10173 IEM_CATCH_LONGJMP_END(pVCpu);
10174#endif
10175
10176 /*
10177 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10178 */
10179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10181 }
10182 else
10183 {
10184 if (pVCpu->iem.s.cActiveMappings > 0)
10185 iemMemRollback(pVCpu);
10186
10187#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10188 /*
10189 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10190 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10191 */
10192 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10193#endif
10194 }
10195
10196 /*
10197 * Maybe re-enter raw-mode and log.
10198 */
10199 if (rcStrict != VINF_SUCCESS)
10200 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10202 if (pcInstructions)
10203 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10204 return rcStrict;
10205}
10206
10207
10208/**
10209 * Interface used by EMExecuteExec, does exit statistics and limits.
10210 *
10211 * @returns Strict VBox status code.
10212 * @param pVCpu The cross context virtual CPU structure.
10213 * @param fWillExit To be defined.
10214 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10215 * @param cMaxInstructions Maximum number of instructions to execute.
10216 * @param cMaxInstructionsWithoutExits
10217 * The max number of instructions without exits.
10218 * @param pStats Where to return statistics.
10219 */
10220VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10221 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10222{
10223 NOREF(fWillExit); /** @todo define flexible exit crits */
10224
10225 /*
10226 * Initialize return stats.
10227 */
10228 pStats->cInstructions = 0;
10229 pStats->cExits = 0;
10230 pStats->cMaxExitDistance = 0;
10231 pStats->cReserved = 0;
10232
10233 /*
10234 * Initial decoder init w/ prefetch, then setup setjmp.
10235 */
10236 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10237 if (rcStrict == VINF_SUCCESS)
10238 {
10239#ifdef IEM_WITH_SETJMP
10240 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10241 IEM_TRY_SETJMP(pVCpu, rcStrict)
10242#endif
10243 {
10244#ifdef IN_RING0
10245 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10246#endif
10247 uint32_t cInstructionSinceLastExit = 0;
10248
10249 /*
10250 * The run loop. We limit ourselves to 4096 instructions right now.
10251 */
10252 PVM pVM = pVCpu->CTX_SUFF(pVM);
10253 for (;;)
10254 {
10255 /*
10256 * Log the state.
10257 */
10258#ifdef LOG_ENABLED
10259 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10260#endif
10261
10262 /*
10263 * Do the decoding and emulation.
10264 */
10265 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10266
10267 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10268 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10269
10270 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10271 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10272 {
10273 pStats->cExits += 1;
10274 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10275 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10276 cInstructionSinceLastExit = 0;
10277 }
10278
10279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10280 {
10281 Assert(pVCpu->iem.s.cActiveMappings == 0);
10282 pVCpu->iem.s.cInstructions++;
10283 pStats->cInstructions++;
10284 cInstructionSinceLastExit++;
10285
10286#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10287 /* Perform any VMX nested-guest instruction boundary actions. */
10288 uint64_t fCpu = pVCpu->fLocalForcedActions;
10289 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10290 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10291 { /* likely */ }
10292 else
10293 {
10294 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10295 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10296 fCpu = pVCpu->fLocalForcedActions;
10297 else
10298 {
10299 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10300 break;
10301 }
10302 }
10303#endif
10304 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10305 {
10306#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10307 uint64_t fCpu = pVCpu->fLocalForcedActions;
10308#endif
10309 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10310 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10311 | VMCPU_FF_TLB_FLUSH
10312 | VMCPU_FF_UNHALT );
10313 if (RT_LIKELY( ( ( !fCpu
10314 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10315 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10316 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10317 || pStats->cInstructions < cMinInstructions))
10318 {
10319 if (pStats->cInstructions < cMaxInstructions)
10320 {
10321 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10322 {
10323#ifdef IN_RING0
10324 if ( !fCheckPreemptionPending
10325 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10326#endif
10327 {
10328 Assert(pVCpu->iem.s.cActiveMappings == 0);
10329 iemReInitDecoder(pVCpu);
10330 continue;
10331 }
10332#ifdef IN_RING0
10333 rcStrict = VINF_EM_RAW_INTERRUPT;
10334 break;
10335#endif
10336 }
10337 }
10338 }
10339 Assert(!(fCpu & VMCPU_FF_IEM));
10340 }
10341 Assert(pVCpu->iem.s.cActiveMappings == 0);
10342 }
10343 else if (pVCpu->iem.s.cActiveMappings > 0)
10344 iemMemRollback(pVCpu);
10345 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10346 break;
10347 }
10348 }
10349#ifdef IEM_WITH_SETJMP
10350 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10351 {
10352 if (pVCpu->iem.s.cActiveMappings > 0)
10353 iemMemRollback(pVCpu);
10354 pVCpu->iem.s.cLongJumps++;
10355 }
10356 IEM_CATCH_LONGJMP_END(pVCpu);
10357#endif
10358
10359 /*
10360 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10361 */
10362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10364 }
10365 else
10366 {
10367 if (pVCpu->iem.s.cActiveMappings > 0)
10368 iemMemRollback(pVCpu);
10369
10370#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10371 /*
10372 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10373 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10374 */
10375 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10376#endif
10377 }
10378
10379 /*
10380 * Maybe re-enter raw-mode and log.
10381 */
10382 if (rcStrict != VINF_SUCCESS)
10383 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10385 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10386 return rcStrict;
10387}
10388
10389
10390/**
10391 * Injects a trap, fault, abort, software interrupt or external interrupt.
10392 *
10393 * The parameter list matches TRPMQueryTrapAll pretty closely.
10394 *
10395 * @returns Strict VBox status code.
10396 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10397 * @param u8TrapNo The trap number.
10398 * @param enmType What type is it (trap/fault/abort), software
10399 * interrupt or hardware interrupt.
10400 * @param uErrCode The error code if applicable.
10401 * @param uCr2 The CR2 value if applicable.
10402 * @param cbInstr The instruction length (only relevant for
10403 * software interrupts).
10404 */
10405VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10406 uint8_t cbInstr)
10407{
10408 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10409#ifdef DBGFTRACE_ENABLED
10410 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10411 u8TrapNo, enmType, uErrCode, uCr2);
10412#endif
10413
10414 uint32_t fFlags;
10415 switch (enmType)
10416 {
10417 case TRPM_HARDWARE_INT:
10418 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10419 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10420 uErrCode = uCr2 = 0;
10421 break;
10422
10423 case TRPM_SOFTWARE_INT:
10424 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10425 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10426 uErrCode = uCr2 = 0;
10427 break;
10428
10429 case TRPM_TRAP:
10430 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10431 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10432 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10433 if (u8TrapNo == X86_XCPT_PF)
10434 fFlags |= IEM_XCPT_FLAGS_CR2;
10435 switch (u8TrapNo)
10436 {
10437 case X86_XCPT_DF:
10438 case X86_XCPT_TS:
10439 case X86_XCPT_NP:
10440 case X86_XCPT_SS:
10441 case X86_XCPT_PF:
10442 case X86_XCPT_AC:
10443 case X86_XCPT_GP:
10444 fFlags |= IEM_XCPT_FLAGS_ERR;
10445 break;
10446 }
10447 break;
10448
10449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10450 }
10451
10452 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10453
10454 if (pVCpu->iem.s.cActiveMappings > 0)
10455 iemMemRollback(pVCpu);
10456
10457 return rcStrict;
10458}
10459
10460
10461/**
10462 * Injects the active TRPM event.
10463 *
10464 * @returns Strict VBox status code.
10465 * @param pVCpu The cross context virtual CPU structure.
10466 */
10467VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10468{
10469#ifndef IEM_IMPLEMENTS_TASKSWITCH
10470 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10471#else
10472 uint8_t u8TrapNo;
10473 TRPMEVENT enmType;
10474 uint32_t uErrCode;
10475 RTGCUINTPTR uCr2;
10476 uint8_t cbInstr;
10477 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10478 if (RT_FAILURE(rc))
10479 return rc;
10480
10481 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10482 * ICEBP \#DB injection as a special case. */
10483 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10484#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10485 if (rcStrict == VINF_SVM_VMEXIT)
10486 rcStrict = VINF_SUCCESS;
10487#endif
10488#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10489 if (rcStrict == VINF_VMX_VMEXIT)
10490 rcStrict = VINF_SUCCESS;
10491#endif
10492 /** @todo Are there any other codes that imply the event was successfully
10493 * delivered to the guest? See @bugref{6607}. */
10494 if ( rcStrict == VINF_SUCCESS
10495 || rcStrict == VINF_IEM_RAISED_XCPT)
10496 TRPMResetTrap(pVCpu);
10497
10498 return rcStrict;
10499#endif
10500}
10501
10502
10503VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10504{
10505 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10506 return VERR_NOT_IMPLEMENTED;
10507}
10508
10509
10510VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10511{
10512 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10513 return VERR_NOT_IMPLEMENTED;
10514}
10515
10516
10517/**
10518 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10519 *
10520 * This API ASSUMES that the caller has already verified that the guest code is
10521 * allowed to access the I/O port. (The I/O port is in the DX register in the
10522 * guest state.)
10523 *
10524 * @returns Strict VBox status code.
10525 * @param pVCpu The cross context virtual CPU structure.
10526 * @param cbValue The size of the I/O port access (1, 2, or 4).
10527 * @param enmAddrMode The addressing mode.
10528 * @param fRepPrefix Indicates whether a repeat prefix is used
10529 * (doesn't matter which for this instruction).
10530 * @param cbInstr The instruction length in bytes.
10531 * @param iEffSeg The effective segment address.
10532 * @param fIoChecked Whether the access to the I/O port has been
10533 * checked or not. It's typically checked in the
10534 * HM scenario.
10535 */
10536VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10537 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10538{
10539 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10541
10542 /*
10543 * State init.
10544 */
10545 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10546
10547 /*
10548 * Switch orgy for getting to the right handler.
10549 */
10550 VBOXSTRICTRC rcStrict;
10551 if (fRepPrefix)
10552 {
10553 switch (enmAddrMode)
10554 {
10555 case IEMMODE_16BIT:
10556 switch (cbValue)
10557 {
10558 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10559 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10560 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10561 default:
10562 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10563 }
10564 break;
10565
10566 case IEMMODE_32BIT:
10567 switch (cbValue)
10568 {
10569 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10570 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10571 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10572 default:
10573 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10574 }
10575 break;
10576
10577 case IEMMODE_64BIT:
10578 switch (cbValue)
10579 {
10580 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10581 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10582 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10583 default:
10584 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10585 }
10586 break;
10587
10588 default:
10589 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10590 }
10591 }
10592 else
10593 {
10594 switch (enmAddrMode)
10595 {
10596 case IEMMODE_16BIT:
10597 switch (cbValue)
10598 {
10599 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10600 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10601 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10602 default:
10603 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10604 }
10605 break;
10606
10607 case IEMMODE_32BIT:
10608 switch (cbValue)
10609 {
10610 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10611 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10612 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10613 default:
10614 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10615 }
10616 break;
10617
10618 case IEMMODE_64BIT:
10619 switch (cbValue)
10620 {
10621 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10622 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10623 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10624 default:
10625 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10626 }
10627 break;
10628
10629 default:
10630 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10631 }
10632 }
10633
10634 if (pVCpu->iem.s.cActiveMappings)
10635 iemMemRollback(pVCpu);
10636
10637 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10638}
10639
10640
10641/**
10642 * Interface for HM and EM for executing string I/O IN (read) instructions.
10643 *
10644 * This API ASSUMES that the caller has already verified that the guest code is
10645 * allowed to access the I/O port. (The I/O port is in the DX register in the
10646 * guest state.)
10647 *
10648 * @returns Strict VBox status code.
10649 * @param pVCpu The cross context virtual CPU structure.
10650 * @param cbValue The size of the I/O port access (1, 2, or 4).
10651 * @param enmAddrMode The addressing mode.
10652 * @param fRepPrefix Indicates whether a repeat prefix is used
10653 * (doesn't matter which for this instruction).
10654 * @param cbInstr The instruction length in bytes.
10655 * @param fIoChecked Whether the access to the I/O port has been
10656 * checked or not. It's typically checked in the
10657 * HM scenario.
10658 */
10659VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10660 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10661{
10662 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10663
10664 /*
10665 * State init.
10666 */
10667 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10668
10669 /*
10670 * Switch orgy for getting to the right handler.
10671 */
10672 VBOXSTRICTRC rcStrict;
10673 if (fRepPrefix)
10674 {
10675 switch (enmAddrMode)
10676 {
10677 case IEMMODE_16BIT:
10678 switch (cbValue)
10679 {
10680 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10681 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10682 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10683 default:
10684 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10685 }
10686 break;
10687
10688 case IEMMODE_32BIT:
10689 switch (cbValue)
10690 {
10691 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10692 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10693 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10694 default:
10695 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10696 }
10697 break;
10698
10699 case IEMMODE_64BIT:
10700 switch (cbValue)
10701 {
10702 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10703 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10704 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10705 default:
10706 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10707 }
10708 break;
10709
10710 default:
10711 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10712 }
10713 }
10714 else
10715 {
10716 switch (enmAddrMode)
10717 {
10718 case IEMMODE_16BIT:
10719 switch (cbValue)
10720 {
10721 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10722 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10723 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10724 default:
10725 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10726 }
10727 break;
10728
10729 case IEMMODE_32BIT:
10730 switch (cbValue)
10731 {
10732 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10733 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10734 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10735 default:
10736 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10737 }
10738 break;
10739
10740 case IEMMODE_64BIT:
10741 switch (cbValue)
10742 {
10743 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10744 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10745 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10746 default:
10747 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10748 }
10749 break;
10750
10751 default:
10752 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10753 }
10754 }
10755
10756 if ( pVCpu->iem.s.cActiveMappings == 0
10757 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10758 { /* likely */ }
10759 else
10760 {
10761 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10762 iemMemRollback(pVCpu);
10763 }
10764 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10765}
10766
10767
10768/**
10769 * Interface for rawmode to write execute an OUT instruction.
10770 *
10771 * @returns Strict VBox status code.
10772 * @param pVCpu The cross context virtual CPU structure.
10773 * @param cbInstr The instruction length in bytes.
10774 * @param u16Port The port to read.
10775 * @param fImm Whether the port is specified using an immediate operand or
10776 * using the implicit DX register.
10777 * @param cbReg The register size.
10778 *
10779 * @remarks In ring-0 not all of the state needs to be synced in.
10780 */
10781VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10782{
10783 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10784 Assert(cbReg <= 4 && cbReg != 3);
10785
10786 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10787 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10788 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10789 Assert(!pVCpu->iem.s.cActiveMappings);
10790 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10791}
10792
10793
10794/**
10795 * Interface for rawmode to write execute an IN instruction.
10796 *
10797 * @returns Strict VBox status code.
10798 * @param pVCpu The cross context virtual CPU structure.
10799 * @param cbInstr The instruction length in bytes.
10800 * @param u16Port The port to read.
10801 * @param fImm Whether the port is specified using an immediate operand or
10802 * using the implicit DX.
10803 * @param cbReg The register size.
10804 */
10805VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10806{
10807 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10808 Assert(cbReg <= 4 && cbReg != 3);
10809
10810 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10811 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10812 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10813 Assert(!pVCpu->iem.s.cActiveMappings);
10814 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10815}
10816
10817
10818/**
10819 * Interface for HM and EM to write to a CRx register.
10820 *
10821 * @returns Strict VBox status code.
10822 * @param pVCpu The cross context virtual CPU structure.
10823 * @param cbInstr The instruction length in bytes.
10824 * @param iCrReg The control register number (destination).
10825 * @param iGReg The general purpose register number (source).
10826 *
10827 * @remarks In ring-0 not all of the state needs to be synced in.
10828 */
10829VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10830{
10831 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10832 Assert(iCrReg < 16);
10833 Assert(iGReg < 16);
10834
10835 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10836 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10837 Assert(!pVCpu->iem.s.cActiveMappings);
10838 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10839}
10840
10841
10842/**
10843 * Interface for HM and EM to read from a CRx register.
10844 *
10845 * @returns Strict VBox status code.
10846 * @param pVCpu The cross context virtual CPU structure.
10847 * @param cbInstr The instruction length in bytes.
10848 * @param iGReg The general purpose register number (destination).
10849 * @param iCrReg The control register number (source).
10850 *
10851 * @remarks In ring-0 not all of the state needs to be synced in.
10852 */
10853VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10854{
10855 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10856 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10857 | CPUMCTX_EXTRN_APIC_TPR);
10858 Assert(iCrReg < 16);
10859 Assert(iGReg < 16);
10860
10861 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10862 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10863 Assert(!pVCpu->iem.s.cActiveMappings);
10864 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10865}
10866
10867
10868/**
10869 * Interface for HM and EM to write to a DRx register.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure.
10873 * @param cbInstr The instruction length in bytes.
10874 * @param iDrReg The debug register number (destination).
10875 * @param iGReg The general purpose register number (source).
10876 *
10877 * @remarks In ring-0 not all of the state needs to be synced in.
10878 */
10879VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10880{
10881 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10883 Assert(iDrReg < 8);
10884 Assert(iGReg < 16);
10885
10886 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10887 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10888 Assert(!pVCpu->iem.s.cActiveMappings);
10889 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10890}
10891
10892
10893/**
10894 * Interface for HM and EM to read from a DRx register.
10895 *
10896 * @returns Strict VBox status code.
10897 * @param pVCpu The cross context virtual CPU structure.
10898 * @param cbInstr The instruction length in bytes.
10899 * @param iGReg The general purpose register number (destination).
10900 * @param iDrReg The debug register number (source).
10901 *
10902 * @remarks In ring-0 not all of the state needs to be synced in.
10903 */
10904VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10905{
10906 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10907 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10908 Assert(iDrReg < 8);
10909 Assert(iGReg < 16);
10910
10911 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10912 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10913 Assert(!pVCpu->iem.s.cActiveMappings);
10914 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10915}
10916
10917
10918/**
10919 * Interface for HM and EM to clear the CR0[TS] bit.
10920 *
10921 * @returns Strict VBox status code.
10922 * @param pVCpu The cross context virtual CPU structure.
10923 * @param cbInstr The instruction length in bytes.
10924 *
10925 * @remarks In ring-0 not all of the state needs to be synced in.
10926 */
10927VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10928{
10929 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10930
10931 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10932 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10933 Assert(!pVCpu->iem.s.cActiveMappings);
10934 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10935}
10936
10937
10938/**
10939 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10940 *
10941 * @returns Strict VBox status code.
10942 * @param pVCpu The cross context virtual CPU structure.
10943 * @param cbInstr The instruction length in bytes.
10944 * @param uValue The value to load into CR0.
10945 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10946 * memory operand. Otherwise pass NIL_RTGCPTR.
10947 *
10948 * @remarks In ring-0 not all of the state needs to be synced in.
10949 */
10950VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10951{
10952 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10953
10954 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10955 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10956 Assert(!pVCpu->iem.s.cActiveMappings);
10957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10958}
10959
10960
10961/**
10962 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10963 *
10964 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10965 *
10966 * @returns Strict VBox status code.
10967 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10968 * @param cbInstr The instruction length in bytes.
10969 * @remarks In ring-0 not all of the state needs to be synced in.
10970 * @thread EMT(pVCpu)
10971 */
10972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10973{
10974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10975
10976 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10977 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10978 Assert(!pVCpu->iem.s.cActiveMappings);
10979 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10980}
10981
10982
10983/**
10984 * Interface for HM and EM to emulate the WBINVD instruction.
10985 *
10986 * @returns Strict VBox status code.
10987 * @param pVCpu The cross context virtual CPU structure.
10988 * @param cbInstr The instruction length in bytes.
10989 *
10990 * @remarks In ring-0 not all of the state needs to be synced in.
10991 */
10992VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10993{
10994 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10995
10996 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10997 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10998 Assert(!pVCpu->iem.s.cActiveMappings);
10999 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11000}
11001
11002
11003/**
11004 * Interface for HM and EM to emulate the INVD instruction.
11005 *
11006 * @returns Strict VBox status code.
11007 * @param pVCpu The cross context virtual CPU structure.
11008 * @param cbInstr The instruction length in bytes.
11009 *
11010 * @remarks In ring-0 not all of the state needs to be synced in.
11011 */
11012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11013{
11014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11015
11016 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11017 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11018 Assert(!pVCpu->iem.s.cActiveMappings);
11019 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11020}
11021
11022
11023/**
11024 * Interface for HM and EM to emulate the INVLPG instruction.
11025 *
11026 * @returns Strict VBox status code.
11027 * @retval VINF_PGM_SYNC_CR3
11028 *
11029 * @param pVCpu The cross context virtual CPU structure.
11030 * @param cbInstr The instruction length in bytes.
11031 * @param GCPtrPage The effective address of the page to invalidate.
11032 *
11033 * @remarks In ring-0 not all of the state needs to be synced in.
11034 */
11035VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11036{
11037 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11038
11039 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11040 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11041 Assert(!pVCpu->iem.s.cActiveMappings);
11042 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11043}
11044
11045
11046/**
11047 * Interface for HM and EM to emulate the INVPCID instruction.
11048 *
11049 * @returns Strict VBox status code.
11050 * @retval VINF_PGM_SYNC_CR3
11051 *
11052 * @param pVCpu The cross context virtual CPU structure.
11053 * @param cbInstr The instruction length in bytes.
11054 * @param iEffSeg The effective segment register.
11055 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11056 * @param uType The invalidation type.
11057 *
11058 * @remarks In ring-0 not all of the state needs to be synced in.
11059 */
11060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11061 uint64_t uType)
11062{
11063 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11064
11065 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11066 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11067 Assert(!pVCpu->iem.s.cActiveMappings);
11068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11069}
11070
11071
11072/**
11073 * Interface for HM and EM to emulate the CPUID instruction.
11074 *
11075 * @returns Strict VBox status code.
11076 *
11077 * @param pVCpu The cross context virtual CPU structure.
11078 * @param cbInstr The instruction length in bytes.
11079 *
11080 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11081 */
11082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11083{
11084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11085 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11086
11087 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11089 Assert(!pVCpu->iem.s.cActiveMappings);
11090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11091}
11092
11093
11094/**
11095 * Interface for HM and EM to emulate the RDPMC instruction.
11096 *
11097 * @returns Strict VBox status code.
11098 *
11099 * @param pVCpu The cross context virtual CPU structure.
11100 * @param cbInstr The instruction length in bytes.
11101 *
11102 * @remarks Not all of the state needs to be synced in.
11103 */
11104VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11105{
11106 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11107 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11108
11109 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11110 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11111 Assert(!pVCpu->iem.s.cActiveMappings);
11112 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11113}
11114
11115
11116/**
11117 * Interface for HM and EM to emulate the RDTSC instruction.
11118 *
11119 * @returns Strict VBox status code.
11120 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11121 *
11122 * @param pVCpu The cross context virtual CPU structure.
11123 * @param cbInstr The instruction length in bytes.
11124 *
11125 * @remarks Not all of the state needs to be synced in.
11126 */
11127VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11128{
11129 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11130 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11131
11132 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11133 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11134 Assert(!pVCpu->iem.s.cActiveMappings);
11135 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11136}
11137
11138
11139/**
11140 * Interface for HM and EM to emulate the RDTSCP instruction.
11141 *
11142 * @returns Strict VBox status code.
11143 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11144 *
11145 * @param pVCpu The cross context virtual CPU structure.
11146 * @param cbInstr The instruction length in bytes.
11147 *
11148 * @remarks Not all of the state needs to be synced in. Recommended
11149 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11150 */
11151VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11152{
11153 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11154 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11155
11156 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11157 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11158 Assert(!pVCpu->iem.s.cActiveMappings);
11159 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11160}
11161
11162
11163/**
11164 * Interface for HM and EM to emulate the RDMSR instruction.
11165 *
11166 * @returns Strict VBox status code.
11167 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11168 *
11169 * @param pVCpu The cross context virtual CPU structure.
11170 * @param cbInstr The instruction length in bytes.
11171 *
11172 * @remarks Not all of the state needs to be synced in. Requires RCX and
11173 * (currently) all MSRs.
11174 */
11175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11176{
11177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11178 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11179
11180 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11181 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11182 Assert(!pVCpu->iem.s.cActiveMappings);
11183 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11184}
11185
11186
11187/**
11188 * Interface for HM and EM to emulate the WRMSR instruction.
11189 *
11190 * @returns Strict VBox status code.
11191 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11192 *
11193 * @param pVCpu The cross context virtual CPU structure.
11194 * @param cbInstr The instruction length in bytes.
11195 *
11196 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11197 * and (currently) all MSRs.
11198 */
11199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11200{
11201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11203 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11204
11205 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11206 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11207 Assert(!pVCpu->iem.s.cActiveMappings);
11208 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11209}
11210
11211
11212/**
11213 * Interface for HM and EM to emulate the MONITOR instruction.
11214 *
11215 * @returns Strict VBox status code.
11216 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11217 *
11218 * @param pVCpu The cross context virtual CPU structure.
11219 * @param cbInstr The instruction length in bytes.
11220 *
11221 * @remarks Not all of the state needs to be synced in.
11222 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11223 * are used.
11224 */
11225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11226{
11227 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11228 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11229
11230 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11231 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11232 Assert(!pVCpu->iem.s.cActiveMappings);
11233 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11234}
11235
11236
11237/**
11238 * Interface for HM and EM to emulate the MWAIT instruction.
11239 *
11240 * @returns Strict VBox status code.
11241 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11242 *
11243 * @param pVCpu The cross context virtual CPU structure.
11244 * @param cbInstr The instruction length in bytes.
11245 *
11246 * @remarks Not all of the state needs to be synced in.
11247 */
11248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11249{
11250 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11251 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11252
11253 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11254 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11255 Assert(!pVCpu->iem.s.cActiveMappings);
11256 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11257}
11258
11259
11260/**
11261 * Interface for HM and EM to emulate the HLT instruction.
11262 *
11263 * @returns Strict VBox status code.
11264 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11265 *
11266 * @param pVCpu The cross context virtual CPU structure.
11267 * @param cbInstr The instruction length in bytes.
11268 *
11269 * @remarks Not all of the state needs to be synced in.
11270 */
11271VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11272{
11273 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11274
11275 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11276 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11277 Assert(!pVCpu->iem.s.cActiveMappings);
11278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11279}
11280
11281
11282/**
11283 * Checks if IEM is in the process of delivering an event (interrupt or
11284 * exception).
11285 *
11286 * @returns true if we're in the process of raising an interrupt or exception,
11287 * false otherwise.
11288 * @param pVCpu The cross context virtual CPU structure.
11289 * @param puVector Where to store the vector associated with the
11290 * currently delivered event, optional.
11291 * @param pfFlags Where to store th event delivery flags (see
11292 * IEM_XCPT_FLAGS_XXX), optional.
11293 * @param puErr Where to store the error code associated with the
11294 * event, optional.
11295 * @param puCr2 Where to store the CR2 associated with the event,
11296 * optional.
11297 * @remarks The caller should check the flags to determine if the error code and
11298 * CR2 are valid for the event.
11299 */
11300VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11301{
11302 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11303 if (fRaisingXcpt)
11304 {
11305 if (puVector)
11306 *puVector = pVCpu->iem.s.uCurXcpt;
11307 if (pfFlags)
11308 *pfFlags = pVCpu->iem.s.fCurXcpt;
11309 if (puErr)
11310 *puErr = pVCpu->iem.s.uCurXcptErr;
11311 if (puCr2)
11312 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11313 }
11314 return fRaisingXcpt;
11315}
11316
11317#ifdef IN_RING3
11318
11319/**
11320 * Handles the unlikely and probably fatal merge cases.
11321 *
11322 * @returns Merged status code.
11323 * @param rcStrict Current EM status code.
11324 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11325 * with @a rcStrict.
11326 * @param iMemMap The memory mapping index. For error reporting only.
11327 * @param pVCpu The cross context virtual CPU structure of the calling
11328 * thread, for error reporting only.
11329 */
11330DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11331 unsigned iMemMap, PVMCPUCC pVCpu)
11332{
11333 if (RT_FAILURE_NP(rcStrict))
11334 return rcStrict;
11335
11336 if (RT_FAILURE_NP(rcStrictCommit))
11337 return rcStrictCommit;
11338
11339 if (rcStrict == rcStrictCommit)
11340 return rcStrictCommit;
11341
11342 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11343 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11344 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11346 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11347 return VERR_IOM_FF_STATUS_IPE;
11348}
11349
11350
11351/**
11352 * Helper for IOMR3ProcessForceFlag.
11353 *
11354 * @returns Merged status code.
11355 * @param rcStrict Current EM status code.
11356 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11357 * with @a rcStrict.
11358 * @param iMemMap The memory mapping index. For error reporting only.
11359 * @param pVCpu The cross context virtual CPU structure of the calling
11360 * thread, for error reporting only.
11361 */
11362DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11363{
11364 /* Simple. */
11365 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11366 return rcStrictCommit;
11367
11368 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11369 return rcStrict;
11370
11371 /* EM scheduling status codes. */
11372 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11373 && rcStrict <= VINF_EM_LAST))
11374 {
11375 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11376 && rcStrictCommit <= VINF_EM_LAST))
11377 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11378 }
11379
11380 /* Unlikely */
11381 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11382}
11383
11384
11385/**
11386 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11387 *
11388 * @returns Merge between @a rcStrict and what the commit operation returned.
11389 * @param pVM The cross context VM structure.
11390 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11391 * @param rcStrict The status code returned by ring-0 or raw-mode.
11392 */
11393VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11394{
11395 /*
11396 * Reset the pending commit.
11397 */
11398 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11399 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11400 ("%#x %#x %#x\n",
11401 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11402 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11403
11404 /*
11405 * Commit the pending bounce buffers (usually just one).
11406 */
11407 unsigned cBufs = 0;
11408 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11409 while (iMemMap-- > 0)
11410 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11411 {
11412 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11413 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11414 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11415
11416 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11417 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11418 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11419
11420 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11421 {
11422 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11424 pbBuf,
11425 cbFirst,
11426 PGMACCESSORIGIN_IEM);
11427 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11428 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11429 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11430 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11431 }
11432
11433 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11434 {
11435 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11437 pbBuf + cbFirst,
11438 cbSecond,
11439 PGMACCESSORIGIN_IEM);
11440 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11441 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11442 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11443 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11444 }
11445 cBufs++;
11446 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11447 }
11448
11449 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11450 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11451 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11452 pVCpu->iem.s.cActiveMappings = 0;
11453 return rcStrict;
11454}
11455
11456#endif /* IN_RING3 */
11457
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette