VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105592

Last change on this file since 105592 was 105592, checked in by vboxsync, 4 months ago

VMM/IEM: Removed ad-hoc TLB trace statement in iemTlbInvalidateLargePageWorkerInner. bugref:10727

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 461.4 KB
Line 
1/* $Id: IEMAll.cpp 105592 2024-08-05 23:13:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729 if (a_fGlobal)
730 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
731 else
732 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
733# endif
734
735# ifdef IEM_WITH_DATA_TLB
736 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
737 if (a_fGlobal)
738 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
739 else
740 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
741# endif
742#else
743 RT_NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates non-global the IEM TLB entries.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVCpu The cross context virtual CPU structure of the calling
754 * thread.
755 */
756VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
757{
758 iemTlbInvalidateAll<false>(pVCpu);
759}
760
761
762/**
763 * Invalidates all the IEM TLB entries.
764 *
765 * This is called internally as well as by PGM when moving GC mappings.
766 *
767 * @param pVCpu The cross context virtual CPU structure of the calling
768 * thread.
769 */
770VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
771{
772 iemTlbInvalidateAll<true>(pVCpu);
773}
774
775
776#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
777
778template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
779DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
780{
781 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
782
783 /*
784 * Combine TAG values with the TLB revisions.
785 */
786 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
787 if (a_fNonGlobal)
788 GCPtrTag |= pTlb->uTlbRevision;
789
790 /*
791 * Set up the scan.
792 *
793 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
794 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
795 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
796 * that fold large page offsets 1MB-2MB into the 0-1MB range.
797 *
798 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
799 */
800 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
801 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
802 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
803 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
804 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
805 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
806
807 /*
808 * Do the scanning.
809 */
810 for (; idxEven < idxEvenEnd; idxEven += 2)
811 {
812 if (a_fNonGlobal)
813 {
814 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
815 {
816 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
817 {
818 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
819 pTlb->aEntries[idxEven].uTag = 0;
820 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
821 pVCpu->iem.s.cbInstrBufTotal = 0;
822 }
823 }
824 GCPtrTag++;
825 }
826
827 if (a_fGlobal)
828 {
829 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
830 {
831 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
832 {
833 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
834 pTlb->aEntries[idxEven + 1].uTag = 0;
835 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
836 pVCpu->iem.s.cbInstrBufTotal = 0;
837 }
838 }
839 GCPtrTagGlob++;
840 }
841 }
842
843}
844
845template<bool const a_fDataTlb, bool const a_f2MbLargePage>
846DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
847{
848 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
849
850 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
851 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
852 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
853 {
854 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
855 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
856 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
857 else
858 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
859 }
860 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
861 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
862 {
863 /* Large pages aren't as likely in the non-global TLB half. */
864 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
865 }
866 else
867 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
868}
869
870template<bool const a_fDataTlb>
871DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
872{
873 /*
874 * Flush the entry pair.
875 */
876 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
877 {
878 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
879 pTlb->aEntries[idxEven].uTag = 0;
880 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
881 pVCpu->iem.s.cbInstrBufTotal = 0;
882 }
883 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
884 {
885 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
886 pTlb->aEntries[idxEven + 1].uTag = 0;
887 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
888 pVCpu->iem.s.cbInstrBufTotal = 0;
889 }
890
891 /*
892 * If there are (or has been) large pages in the TLB, we must check if the
893 * address being flushed may involve one of those, as then we'd have to
894 * scan for entries relating to the same page and flush those as well.
895 */
896# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
897 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
898# else
899 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
900# endif
901 {
902 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
903 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
904 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
905 else
906 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
907 }
908}
909
910#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
911
912/**
913 * Invalidates a page in the TLBs.
914 *
915 * @param pVCpu The cross context virtual CPU structure of the calling
916 * thread.
917 * @param GCPtr The address of the page to invalidate
918 * @thread EMT(pVCpu)
919 */
920VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
921{
922 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
923#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
924 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
925 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
926 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
927 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
928
929# ifdef IEM_WITH_CODE_TLB
930 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
931# endif
932# ifdef IEM_WITH_DATA_TLB
933 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
934# endif
935#else
936 NOREF(pVCpu); NOREF(GCPtr);
937#endif
938}
939
940
941#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
942/**
943 * Invalid both TLBs slow fashion following a rollover.
944 *
945 * Worker for IEMTlbInvalidateAllPhysical,
946 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
947 * iemMemMapJmp and others.
948 *
949 * @thread EMT(pVCpu)
950 */
951static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
952{
953 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
954 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
955 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
956
957 unsigned i;
958# ifdef IEM_WITH_CODE_TLB
959 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
960 while (i-- > 0)
961 {
962 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
963 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
964 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
965 }
966 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
967 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
968# endif
969# ifdef IEM_WITH_DATA_TLB
970 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
971 while (i-- > 0)
972 {
973 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
974 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
975 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
976 }
977 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
978 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
979# endif
980
981}
982#endif
983
984
985/**
986 * Invalidates the host physical aspects of the IEM TLBs.
987 *
988 * This is called internally as well as by PGM when moving GC mappings.
989 *
990 * @param pVCpu The cross context virtual CPU structure of the calling
991 * thread.
992 * @note Currently not used.
993 */
994VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
995{
996#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
997 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
998 Log10(("IEMTlbInvalidateAllPhysical\n"));
999
1000# ifdef IEM_WITH_CODE_TLB
1001 pVCpu->iem.s.cbInstrBufTotal = 0;
1002# endif
1003 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1004 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
1005 {
1006 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1007 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1008 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1009 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1010 }
1011 else
1012 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1013#else
1014 NOREF(pVCpu);
1015#endif
1016}
1017
1018
1019/**
1020 * Invalidates the host physical aspects of the IEM TLBs.
1021 *
1022 * This is called internally as well as by PGM when moving GC mappings.
1023 *
1024 * @param pVM The cross context VM structure.
1025 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1026 * otherwise NIL_VMCPUID.
1027 * @param enmReason The reason we're called.
1028 *
1029 * @remarks Caller holds the PGM lock.
1030 */
1031VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1032{
1033#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1034 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1035 if (pVCpuCaller)
1036 VMCPU_ASSERT_EMT(pVCpuCaller);
1037 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1038
1039 VMCC_FOR_EACH_VMCPU(pVM)
1040 {
1041# ifdef IEM_WITH_CODE_TLB
1042 if (pVCpuCaller == pVCpu)
1043 pVCpu->iem.s.cbInstrBufTotal = 0;
1044# endif
1045
1046 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1047 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1048 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1049 { /* likely */}
1050 else if (pVCpuCaller != pVCpu)
1051 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1052 else
1053 {
1054 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1055 continue;
1056 }
1057 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1058 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1059
1060 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1061 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1062 }
1063 VMCC_FOR_EACH_VMCPU_END(pVM);
1064
1065#else
1066 RT_NOREF(pVM, idCpuCaller, enmReason);
1067#endif
1068}
1069
1070
1071/**
1072 * Flushes the prefetch buffer, light version.
1073 */
1074void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1075{
1076#ifndef IEM_WITH_CODE_TLB
1077 pVCpu->iem.s.cbOpcode = cbInstr;
1078#else
1079 RT_NOREF(pVCpu, cbInstr);
1080#endif
1081}
1082
1083
1084/**
1085 * Flushes the prefetch buffer, heavy version.
1086 */
1087void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1088{
1089#ifndef IEM_WITH_CODE_TLB
1090 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1091#elif 1
1092 pVCpu->iem.s.cbInstrBufTotal = 0;
1093 RT_NOREF(cbInstr);
1094#else
1095 RT_NOREF(pVCpu, cbInstr);
1096#endif
1097}
1098
1099
1100
1101#ifdef IEM_WITH_CODE_TLB
1102
1103/**
1104 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1105 * failure and jumps.
1106 *
1107 * We end up here for a number of reasons:
1108 * - pbInstrBuf isn't yet initialized.
1109 * - Advancing beyond the buffer boundrary (e.g. cross page).
1110 * - Advancing beyond the CS segment limit.
1111 * - Fetching from non-mappable page (e.g. MMIO).
1112 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1113 *
1114 * @param pVCpu The cross context virtual CPU structure of the
1115 * calling thread.
1116 * @param pvDst Where to return the bytes.
1117 * @param cbDst Number of bytes to read. A value of zero is
1118 * allowed for initializing pbInstrBuf (the
1119 * recompiler does this). In this case it is best
1120 * to set pbInstrBuf to NULL prior to the call.
1121 */
1122void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1123{
1124# ifdef IN_RING3
1125 for (;;)
1126 {
1127 Assert(cbDst <= 8);
1128 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1129
1130 /*
1131 * We might have a partial buffer match, deal with that first to make the
1132 * rest simpler. This is the first part of the cross page/buffer case.
1133 */
1134 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1135 if (pbInstrBuf != NULL)
1136 {
1137 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1138 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1139 if (offBuf < cbInstrBuf)
1140 {
1141 Assert(offBuf + cbDst > cbInstrBuf);
1142 uint32_t const cbCopy = cbInstrBuf - offBuf;
1143 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1144
1145 cbDst -= cbCopy;
1146 pvDst = (uint8_t *)pvDst + cbCopy;
1147 offBuf += cbCopy;
1148 }
1149 }
1150
1151 /*
1152 * Check segment limit, figuring how much we're allowed to access at this point.
1153 *
1154 * We will fault immediately if RIP is past the segment limit / in non-canonical
1155 * territory. If we do continue, there are one or more bytes to read before we
1156 * end up in trouble and we need to do that first before faulting.
1157 */
1158 RTGCPTR GCPtrFirst;
1159 uint32_t cbMaxRead;
1160 if (IEM_IS_64BIT_CODE(pVCpu))
1161 {
1162 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1163 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1164 { /* likely */ }
1165 else
1166 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1167 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1168 }
1169 else
1170 {
1171 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1172 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1173 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1174 { /* likely */ }
1175 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1176 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1177 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1178 if (cbMaxRead != 0)
1179 { /* likely */ }
1180 else
1181 {
1182 /* Overflowed because address is 0 and limit is max. */
1183 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1184 cbMaxRead = X86_PAGE_SIZE;
1185 }
1186 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1187 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1188 if (cbMaxRead2 < cbMaxRead)
1189 cbMaxRead = cbMaxRead2;
1190 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1191 }
1192
1193 /*
1194 * Get the TLB entry for this piece of code.
1195 */
1196 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1197 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1198 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1199 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1200 {
1201 /* likely when executing lots of code, otherwise unlikely */
1202# ifdef IEM_WITH_TLB_STATISTICS
1203 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1204# endif
1205 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1206
1207 /* Check TLB page table level access flags. */
1208 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1209 {
1210 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1211 {
1212 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1213 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1214 }
1215 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1216 {
1217 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1218 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1219 }
1220 }
1221
1222 /* Look up the physical page info if necessary. */
1223 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1224 { /* not necessary */ }
1225 else
1226 {
1227 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1228 { /* likely */ }
1229 else
1230 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1231 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1232 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1233 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1234 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1235 }
1236 }
1237 else
1238 {
1239 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1240
1241 /* This page table walking will set A bits as required by the access while performing the walk.
1242 ASSUMES these are set when the address is translated rather than on commit... */
1243 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1244 PGMPTWALKFAST WalkFast;
1245 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1246 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1247 &WalkFast);
1248 if (RT_SUCCESS(rc))
1249 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1250 else
1251 {
1252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1253 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1254 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1255#endif
1256 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1257 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1258 }
1259
1260 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1261 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1262 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1263 {
1264 pTlbe--;
1265 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1266 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1267 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1268 }
1269 else
1270 {
1271 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1272 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1273 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1274 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1275 }
1276 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1277 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1278 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1279 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1280 pTlbe->GCPhys = GCPhysPg;
1281 pTlbe->pbMappingR3 = NULL;
1282 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1283 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1284 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1285
1286 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
1287 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1288 else
1289 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1290
1291 /* Resolve the physical address. */
1292 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1293 { /* likely */ }
1294 else
1295 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1296 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1297 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1298 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1299 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1300 }
1301
1302# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1303 /*
1304 * Try do a direct read using the pbMappingR3 pointer.
1305 * Note! Do not recheck the physical TLB revision number here as we have the
1306 * wrong response to changes in the else case. If someone is updating
1307 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1308 * pretending we always won the race.
1309 */
1310 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1311 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1312 {
1313 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1314 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1315 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1316 {
1317 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1318 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1319 }
1320 else
1321 {
1322 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1323 if (cbInstr + (uint32_t)cbDst <= 15)
1324 {
1325 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1326 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1327 }
1328 else
1329 {
1330 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1331 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1332 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1333 }
1334 }
1335 if (cbDst <= cbMaxRead)
1336 {
1337 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1338 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1339
1340 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1341 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1342 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1343 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1344 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1345 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1346 else
1347 Assert(!pvDst);
1348 return;
1349 }
1350 pVCpu->iem.s.pbInstrBuf = NULL;
1351
1352 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1353 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1354 }
1355# else
1356# error "refactor as needed"
1357 /*
1358 * If there is no special read handling, so we can read a bit more and
1359 * put it in the prefetch buffer.
1360 */
1361 if ( cbDst < cbMaxRead
1362 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1363 {
1364 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1365 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1366 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1367 { /* likely */ }
1368 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1369 {
1370 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1371 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1373 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1374 }
1375 else
1376 {
1377 Log((RT_SUCCESS(rcStrict)
1378 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1379 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1380 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1381 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1382 }
1383 }
1384# endif
1385 /*
1386 * Special read handling, so only read exactly what's needed.
1387 * This is a highly unlikely scenario.
1388 */
1389 else
1390 {
1391 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1392
1393 /* Check instruction length. */
1394 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1395 if (RT_LIKELY(cbInstr + cbDst <= 15))
1396 { /* likely */ }
1397 else
1398 {
1399 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1400 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1401 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1402 }
1403
1404 /* Do the reading. */
1405 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1406 if (cbToRead > 0)
1407 {
1408 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1409 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1410 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1411 { /* likely */ }
1412 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1413 {
1414 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1415 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1416 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1417 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1418 }
1419 else
1420 {
1421 Log((RT_SUCCESS(rcStrict)
1422 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1423 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1424 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1425 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1426 }
1427 }
1428
1429 /* Update the state and probably return. */
1430 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1431 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1432 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1433
1434 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1435 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1436 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1437 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1438 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1439 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1440 pVCpu->iem.s.pbInstrBuf = NULL;
1441 if (cbToRead == cbDst)
1442 return;
1443 Assert(cbToRead == cbMaxRead);
1444 }
1445
1446 /*
1447 * More to read, loop.
1448 */
1449 cbDst -= cbMaxRead;
1450 pvDst = (uint8_t *)pvDst + cbMaxRead;
1451 }
1452# else /* !IN_RING3 */
1453 RT_NOREF(pvDst, cbDst);
1454 if (pvDst || cbDst)
1455 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1456# endif /* !IN_RING3 */
1457}
1458
1459#else /* !IEM_WITH_CODE_TLB */
1460
1461/**
1462 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1463 * exception if it fails.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pVCpu The cross context virtual CPU structure of the
1467 * calling thread.
1468 * @param cbMin The minimum number of bytes relative offOpcode
1469 * that must be read.
1470 */
1471VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1472{
1473 /*
1474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1475 *
1476 * First translate CS:rIP to a physical address.
1477 */
1478 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1479 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1480 uint8_t const cbLeft = cbOpcode - offOpcode;
1481 Assert(cbLeft < cbMin);
1482 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1483
1484 uint32_t cbToTryRead;
1485 RTGCPTR GCPtrNext;
1486 if (IEM_IS_64BIT_CODE(pVCpu))
1487 {
1488 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1489 if (!IEM_IS_CANONICAL(GCPtrNext))
1490 return iemRaiseGeneralProtectionFault0(pVCpu);
1491 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1492 }
1493 else
1494 {
1495 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1496 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1497 GCPtrNext32 += cbOpcode;
1498 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1499 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1500 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1501 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1502 if (!cbToTryRead) /* overflowed */
1503 {
1504 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1505 cbToTryRead = UINT32_MAX;
1506 /** @todo check out wrapping around the code segment. */
1507 }
1508 if (cbToTryRead < cbMin - cbLeft)
1509 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1510 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1511
1512 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1513 if (cbToTryRead > cbLeftOnPage)
1514 cbToTryRead = cbLeftOnPage;
1515 }
1516
1517 /* Restrict to opcode buffer space.
1518
1519 We're making ASSUMPTIONS here based on work done previously in
1520 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1521 be fetched in case of an instruction crossing two pages. */
1522 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1523 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1524 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1525 { /* likely */ }
1526 else
1527 {
1528 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1530 return iemRaiseGeneralProtectionFault0(pVCpu);
1531 }
1532
1533 PGMPTWALKFAST WalkFast;
1534 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1535 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1536 &WalkFast);
1537 if (RT_SUCCESS(rc))
1538 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1539 else
1540 {
1541 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1542#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1543 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1544 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1545#endif
1546 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1547 }
1548 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1549 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1550
1551 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1552 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1553
1554 /*
1555 * Read the bytes at this address.
1556 *
1557 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1558 * and since PATM should only patch the start of an instruction there
1559 * should be no need to check again here.
1560 */
1561 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1562 {
1563 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1564 cbToTryRead, PGMACCESSORIGIN_IEM);
1565 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1566 { /* likely */ }
1567 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1568 {
1569 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1570 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1571 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1572 }
1573 else
1574 {
1575 Log((RT_SUCCESS(rcStrict)
1576 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1577 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1578 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1579 return rcStrict;
1580 }
1581 }
1582 else
1583 {
1584 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1585 if (RT_SUCCESS(rc))
1586 { /* likely */ }
1587 else
1588 {
1589 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1590 return rc;
1591 }
1592 }
1593 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1594 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1595
1596 return VINF_SUCCESS;
1597}
1598
1599#endif /* !IEM_WITH_CODE_TLB */
1600#ifndef IEM_WITH_SETJMP
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the
1607 * calling thread.
1608 * @param pb Where to return the opcode byte.
1609 */
1610VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1611{
1612 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1613 if (rcStrict == VINF_SUCCESS)
1614 {
1615 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1616 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1617 pVCpu->iem.s.offOpcode = offOpcode + 1;
1618 }
1619 else
1620 *pb = 0;
1621 return rcStrict;
1622}
1623
1624#else /* IEM_WITH_SETJMP */
1625
1626/**
1627 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1628 *
1629 * @returns The opcode byte.
1630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1631 */
1632uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1633{
1634# ifdef IEM_WITH_CODE_TLB
1635 uint8_t u8;
1636 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1637 return u8;
1638# else
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1640 if (rcStrict == VINF_SUCCESS)
1641 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1642 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1643# endif
1644}
1645
1646#endif /* IEM_WITH_SETJMP */
1647
1648#ifndef IEM_WITH_SETJMP
1649
1650/**
1651 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1652 *
1653 * @returns Strict VBox status code.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param pu16 Where to return the opcode dword.
1656 */
1657VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1658{
1659 uint8_t u8;
1660 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1661 if (rcStrict == VINF_SUCCESS)
1662 *pu16 = (int8_t)u8;
1663 return rcStrict;
1664}
1665
1666
1667/**
1668 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1672 * @param pu32 Where to return the opcode dword.
1673 */
1674VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1675{
1676 uint8_t u8;
1677 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1678 if (rcStrict == VINF_SUCCESS)
1679 *pu32 = (int8_t)u8;
1680 return rcStrict;
1681}
1682
1683
1684/**
1685 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1686 *
1687 * @returns Strict VBox status code.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 * @param pu64 Where to return the opcode qword.
1690 */
1691VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1692{
1693 uint8_t u8;
1694 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1695 if (rcStrict == VINF_SUCCESS)
1696 *pu64 = (int8_t)u8;
1697 return rcStrict;
1698}
1699
1700#endif /* !IEM_WITH_SETJMP */
1701
1702
1703#ifndef IEM_WITH_SETJMP
1704
1705/**
1706 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1707 *
1708 * @returns Strict VBox status code.
1709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1710 * @param pu16 Where to return the opcode word.
1711 */
1712VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1713{
1714 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1715 if (rcStrict == VINF_SUCCESS)
1716 {
1717 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1718# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1719 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1720# else
1721 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1722# endif
1723 pVCpu->iem.s.offOpcode = offOpcode + 2;
1724 }
1725 else
1726 *pu16 = 0;
1727 return rcStrict;
1728}
1729
1730#else /* IEM_WITH_SETJMP */
1731
1732/**
1733 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1734 *
1735 * @returns The opcode word.
1736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1737 */
1738uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1739{
1740# ifdef IEM_WITH_CODE_TLB
1741 uint16_t u16;
1742 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1743 return u16;
1744# else
1745 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1746 if (rcStrict == VINF_SUCCESS)
1747 {
1748 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1749 pVCpu->iem.s.offOpcode += 2;
1750# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1751 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1752# else
1753 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1754# endif
1755 }
1756 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1757# endif
1758}
1759
1760#endif /* IEM_WITH_SETJMP */
1761
1762#ifndef IEM_WITH_SETJMP
1763
1764/**
1765 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1766 *
1767 * @returns Strict VBox status code.
1768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1769 * @param pu32 Where to return the opcode double word.
1770 */
1771VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1772{
1773 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1774 if (rcStrict == VINF_SUCCESS)
1775 {
1776 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1777 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1778 pVCpu->iem.s.offOpcode = offOpcode + 2;
1779 }
1780 else
1781 *pu32 = 0;
1782 return rcStrict;
1783}
1784
1785
1786/**
1787 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1788 *
1789 * @returns Strict VBox status code.
1790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1791 * @param pu64 Where to return the opcode quad word.
1792 */
1793VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1794{
1795 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1796 if (rcStrict == VINF_SUCCESS)
1797 {
1798 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1799 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1800 pVCpu->iem.s.offOpcode = offOpcode + 2;
1801 }
1802 else
1803 *pu64 = 0;
1804 return rcStrict;
1805}
1806
1807#endif /* !IEM_WITH_SETJMP */
1808
1809#ifndef IEM_WITH_SETJMP
1810
1811/**
1812 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1813 *
1814 * @returns Strict VBox status code.
1815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1816 * @param pu32 Where to return the opcode dword.
1817 */
1818VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1819{
1820 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1821 if (rcStrict == VINF_SUCCESS)
1822 {
1823 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1824# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1825 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1826# else
1827 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1828 pVCpu->iem.s.abOpcode[offOpcode + 1],
1829 pVCpu->iem.s.abOpcode[offOpcode + 2],
1830 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1831# endif
1832 pVCpu->iem.s.offOpcode = offOpcode + 4;
1833 }
1834 else
1835 *pu32 = 0;
1836 return rcStrict;
1837}
1838
1839#else /* IEM_WITH_SETJMP */
1840
1841/**
1842 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1843 *
1844 * @returns The opcode dword.
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 */
1847uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1848{
1849# ifdef IEM_WITH_CODE_TLB
1850 uint32_t u32;
1851 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1852 return u32;
1853# else
1854 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1855 if (rcStrict == VINF_SUCCESS)
1856 {
1857 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1858 pVCpu->iem.s.offOpcode = offOpcode + 4;
1859# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1860 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1861# else
1862 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1863 pVCpu->iem.s.abOpcode[offOpcode + 1],
1864 pVCpu->iem.s.abOpcode[offOpcode + 2],
1865 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1866# endif
1867 }
1868 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1869# endif
1870}
1871
1872#endif /* IEM_WITH_SETJMP */
1873
1874#ifndef IEM_WITH_SETJMP
1875
1876/**
1877 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1878 *
1879 * @returns Strict VBox status code.
1880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1881 * @param pu64 Where to return the opcode dword.
1882 */
1883VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1884{
1885 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1886 if (rcStrict == VINF_SUCCESS)
1887 {
1888 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1889 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1890 pVCpu->iem.s.abOpcode[offOpcode + 1],
1891 pVCpu->iem.s.abOpcode[offOpcode + 2],
1892 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1893 pVCpu->iem.s.offOpcode = offOpcode + 4;
1894 }
1895 else
1896 *pu64 = 0;
1897 return rcStrict;
1898}
1899
1900
1901/**
1902 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1903 *
1904 * @returns Strict VBox status code.
1905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1906 * @param pu64 Where to return the opcode qword.
1907 */
1908VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1909{
1910 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1911 if (rcStrict == VINF_SUCCESS)
1912 {
1913 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1914 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1915 pVCpu->iem.s.abOpcode[offOpcode + 1],
1916 pVCpu->iem.s.abOpcode[offOpcode + 2],
1917 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1918 pVCpu->iem.s.offOpcode = offOpcode + 4;
1919 }
1920 else
1921 *pu64 = 0;
1922 return rcStrict;
1923}
1924
1925#endif /* !IEM_WITH_SETJMP */
1926
1927#ifndef IEM_WITH_SETJMP
1928
1929/**
1930 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1931 *
1932 * @returns Strict VBox status code.
1933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1934 * @param pu64 Where to return the opcode qword.
1935 */
1936VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1937{
1938 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1939 if (rcStrict == VINF_SUCCESS)
1940 {
1941 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1942# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1943 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1944# else
1945 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1946 pVCpu->iem.s.abOpcode[offOpcode + 1],
1947 pVCpu->iem.s.abOpcode[offOpcode + 2],
1948 pVCpu->iem.s.abOpcode[offOpcode + 3],
1949 pVCpu->iem.s.abOpcode[offOpcode + 4],
1950 pVCpu->iem.s.abOpcode[offOpcode + 5],
1951 pVCpu->iem.s.abOpcode[offOpcode + 6],
1952 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1953# endif
1954 pVCpu->iem.s.offOpcode = offOpcode + 8;
1955 }
1956 else
1957 *pu64 = 0;
1958 return rcStrict;
1959}
1960
1961#else /* IEM_WITH_SETJMP */
1962
1963/**
1964 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1965 *
1966 * @returns The opcode qword.
1967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1968 */
1969uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1970{
1971# ifdef IEM_WITH_CODE_TLB
1972 uint64_t u64;
1973 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1974 return u64;
1975# else
1976 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1977 if (rcStrict == VINF_SUCCESS)
1978 {
1979 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1980 pVCpu->iem.s.offOpcode = offOpcode + 8;
1981# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1982 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1983# else
1984 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1985 pVCpu->iem.s.abOpcode[offOpcode + 1],
1986 pVCpu->iem.s.abOpcode[offOpcode + 2],
1987 pVCpu->iem.s.abOpcode[offOpcode + 3],
1988 pVCpu->iem.s.abOpcode[offOpcode + 4],
1989 pVCpu->iem.s.abOpcode[offOpcode + 5],
1990 pVCpu->iem.s.abOpcode[offOpcode + 6],
1991 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1992# endif
1993 }
1994 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1995# endif
1996}
1997
1998#endif /* IEM_WITH_SETJMP */
1999
2000
2001
2002/** @name Misc Worker Functions.
2003 * @{
2004 */
2005
2006/**
2007 * Gets the exception class for the specified exception vector.
2008 *
2009 * @returns The class of the specified exception.
2010 * @param uVector The exception vector.
2011 */
2012static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
2013{
2014 Assert(uVector <= X86_XCPT_LAST);
2015 switch (uVector)
2016 {
2017 case X86_XCPT_DE:
2018 case X86_XCPT_TS:
2019 case X86_XCPT_NP:
2020 case X86_XCPT_SS:
2021 case X86_XCPT_GP:
2022 case X86_XCPT_SX: /* AMD only */
2023 return IEMXCPTCLASS_CONTRIBUTORY;
2024
2025 case X86_XCPT_PF:
2026 case X86_XCPT_VE: /* Intel only */
2027 return IEMXCPTCLASS_PAGE_FAULT;
2028
2029 case X86_XCPT_DF:
2030 return IEMXCPTCLASS_DOUBLE_FAULT;
2031 }
2032 return IEMXCPTCLASS_BENIGN;
2033}
2034
2035
2036/**
2037 * Evaluates how to handle an exception caused during delivery of another event
2038 * (exception / interrupt).
2039 *
2040 * @returns How to handle the recursive exception.
2041 * @param pVCpu The cross context virtual CPU structure of the
2042 * calling thread.
2043 * @param fPrevFlags The flags of the previous event.
2044 * @param uPrevVector The vector of the previous event.
2045 * @param fCurFlags The flags of the current exception.
2046 * @param uCurVector The vector of the current exception.
2047 * @param pfXcptRaiseInfo Where to store additional information about the
2048 * exception condition. Optional.
2049 */
2050VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2051 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2052{
2053 /*
2054 * Only CPU exceptions can be raised while delivering other events, software interrupt
2055 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2056 */
2057 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2058 Assert(pVCpu); RT_NOREF(pVCpu);
2059 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2060
2061 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2062 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2063 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2064 {
2065 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2066 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2067 {
2068 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2069 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2070 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2071 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2072 {
2073 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2074 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2075 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2076 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2077 uCurVector, pVCpu->cpum.GstCtx.cr2));
2078 }
2079 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2080 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2081 {
2082 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2083 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2084 }
2085 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2086 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2087 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2088 {
2089 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2090 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2091 }
2092 }
2093 else
2094 {
2095 if (uPrevVector == X86_XCPT_NMI)
2096 {
2097 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2098 if (uCurVector == X86_XCPT_PF)
2099 {
2100 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2101 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2102 }
2103 }
2104 else if ( uPrevVector == X86_XCPT_AC
2105 && uCurVector == X86_XCPT_AC)
2106 {
2107 enmRaise = IEMXCPTRAISE_CPU_HANG;
2108 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2109 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2110 }
2111 }
2112 }
2113 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2114 {
2115 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2116 if (uCurVector == X86_XCPT_PF)
2117 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2118 }
2119 else
2120 {
2121 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2122 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2123 }
2124
2125 if (pfXcptRaiseInfo)
2126 *pfXcptRaiseInfo = fRaiseInfo;
2127 return enmRaise;
2128}
2129
2130
2131/**
2132 * Enters the CPU shutdown state initiated by a triple fault or other
2133 * unrecoverable conditions.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the
2137 * calling thread.
2138 */
2139static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2140{
2141 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2142 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2143
2144 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2145 {
2146 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2147 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2148 }
2149
2150 RT_NOREF(pVCpu);
2151 return VINF_EM_TRIPLE_FAULT;
2152}
2153
2154
2155/**
2156 * Validates a new SS segment.
2157 *
2158 * @returns VBox strict status code.
2159 * @param pVCpu The cross context virtual CPU structure of the
2160 * calling thread.
2161 * @param NewSS The new SS selctor.
2162 * @param uCpl The CPL to load the stack for.
2163 * @param pDesc Where to return the descriptor.
2164 */
2165static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2166{
2167 /* Null selectors are not allowed (we're not called for dispatching
2168 interrupts with SS=0 in long mode). */
2169 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2170 {
2171 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2172 return iemRaiseTaskSwitchFault0(pVCpu);
2173 }
2174
2175 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2176 if ((NewSS & X86_SEL_RPL) != uCpl)
2177 {
2178 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2179 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2180 }
2181
2182 /*
2183 * Read the descriptor.
2184 */
2185 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2186 if (rcStrict != VINF_SUCCESS)
2187 return rcStrict;
2188
2189 /*
2190 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2191 */
2192 if (!pDesc->Legacy.Gen.u1DescType)
2193 {
2194 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2195 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2196 }
2197
2198 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2199 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2200 {
2201 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2202 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2203 }
2204 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2205 {
2206 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2207 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2208 }
2209
2210 /* Is it there? */
2211 /** @todo testcase: Is this checked before the canonical / limit check below? */
2212 if (!pDesc->Legacy.Gen.u1Present)
2213 {
2214 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2215 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2216 }
2217
2218 return VINF_SUCCESS;
2219}
2220
2221/** @} */
2222
2223
2224/** @name Raising Exceptions.
2225 *
2226 * @{
2227 */
2228
2229
2230/**
2231 * Loads the specified stack far pointer from the TSS.
2232 *
2233 * @returns VBox strict status code.
2234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2235 * @param uCpl The CPL to load the stack for.
2236 * @param pSelSS Where to return the new stack segment.
2237 * @param puEsp Where to return the new stack pointer.
2238 */
2239static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2240{
2241 VBOXSTRICTRC rcStrict;
2242 Assert(uCpl < 4);
2243
2244 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2245 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2246 {
2247 /*
2248 * 16-bit TSS (X86TSS16).
2249 */
2250 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2251 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2252 {
2253 uint32_t off = uCpl * 4 + 2;
2254 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2255 {
2256 /** @todo check actual access pattern here. */
2257 uint32_t u32Tmp = 0; /* gcc maybe... */
2258 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2259 if (rcStrict == VINF_SUCCESS)
2260 {
2261 *puEsp = RT_LOWORD(u32Tmp);
2262 *pSelSS = RT_HIWORD(u32Tmp);
2263 return VINF_SUCCESS;
2264 }
2265 }
2266 else
2267 {
2268 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2269 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2270 }
2271 break;
2272 }
2273
2274 /*
2275 * 32-bit TSS (X86TSS32).
2276 */
2277 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2278 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2279 {
2280 uint32_t off = uCpl * 8 + 4;
2281 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2282 {
2283/** @todo check actual access pattern here. */
2284 uint64_t u64Tmp;
2285 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2286 if (rcStrict == VINF_SUCCESS)
2287 {
2288 *puEsp = u64Tmp & UINT32_MAX;
2289 *pSelSS = (RTSEL)(u64Tmp >> 32);
2290 return VINF_SUCCESS;
2291 }
2292 }
2293 else
2294 {
2295 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2296 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2297 }
2298 break;
2299 }
2300
2301 default:
2302 AssertFailed();
2303 rcStrict = VERR_IEM_IPE_4;
2304 break;
2305 }
2306
2307 *puEsp = 0; /* make gcc happy */
2308 *pSelSS = 0; /* make gcc happy */
2309 return rcStrict;
2310}
2311
2312
2313/**
2314 * Loads the specified stack pointer from the 64-bit TSS.
2315 *
2316 * @returns VBox strict status code.
2317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2318 * @param uCpl The CPL to load the stack for.
2319 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2320 * @param puRsp Where to return the new stack pointer.
2321 */
2322static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2323{
2324 Assert(uCpl < 4);
2325 Assert(uIst < 8);
2326 *puRsp = 0; /* make gcc happy */
2327
2328 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2329 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2330
2331 uint32_t off;
2332 if (uIst)
2333 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2334 else
2335 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2336 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2337 {
2338 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2339 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2340 }
2341
2342 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2343}
2344
2345
2346/**
2347 * Adjust the CPU state according to the exception being raised.
2348 *
2349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2350 * @param u8Vector The exception that has been raised.
2351 */
2352DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2353{
2354 switch (u8Vector)
2355 {
2356 case X86_XCPT_DB:
2357 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2358 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2359 break;
2360 /** @todo Read the AMD and Intel exception reference... */
2361 }
2362}
2363
2364
2365/**
2366 * Implements exceptions and interrupts for real mode.
2367 *
2368 * @returns VBox strict status code.
2369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2370 * @param cbInstr The number of bytes to offset rIP by in the return
2371 * address.
2372 * @param u8Vector The interrupt / exception vector number.
2373 * @param fFlags The flags.
2374 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2375 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2376 */
2377static VBOXSTRICTRC
2378iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2379 uint8_t cbInstr,
2380 uint8_t u8Vector,
2381 uint32_t fFlags,
2382 uint16_t uErr,
2383 uint64_t uCr2) RT_NOEXCEPT
2384{
2385 NOREF(uErr); NOREF(uCr2);
2386 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2387
2388 /*
2389 * Read the IDT entry.
2390 */
2391 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2392 {
2393 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2394 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2395 }
2396 RTFAR16 Idte;
2397 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2398 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2399 {
2400 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2401 return rcStrict;
2402 }
2403
2404#ifdef LOG_ENABLED
2405 /* If software interrupt, try decode it if logging is enabled and such. */
2406 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2407 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2408 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2409#endif
2410
2411 /*
2412 * Push the stack frame.
2413 */
2414 uint8_t bUnmapInfo;
2415 uint16_t *pu16Frame;
2416 uint64_t uNewRsp;
2417 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420
2421 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2422#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2423 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2424 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2425 fEfl |= UINT16_C(0xf000);
2426#endif
2427 pu16Frame[2] = (uint16_t)fEfl;
2428 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2429 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2430 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2431 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2432 return rcStrict;
2433
2434 /*
2435 * Load the vector address into cs:ip and make exception specific state
2436 * adjustments.
2437 */
2438 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2439 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2440 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2441 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2442 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2443 pVCpu->cpum.GstCtx.rip = Idte.off;
2444 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2445 IEMMISC_SET_EFL(pVCpu, fEfl);
2446
2447 /** @todo do we actually do this in real mode? */
2448 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2449 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2450
2451 /*
2452 * Deal with debug events that follows the exception and clear inhibit flags.
2453 */
2454 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2455 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2456 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2457 else
2458 {
2459 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2460 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2461 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2462 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2463 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2464 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2465 return iemRaiseDebugException(pVCpu);
2466 }
2467
2468 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2469 so best leave them alone in case we're in a weird kind of real mode... */
2470
2471 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2472}
2473
2474
2475/**
2476 * Loads a NULL data selector into when coming from V8086 mode.
2477 *
2478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2479 * @param pSReg Pointer to the segment register.
2480 */
2481DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2482{
2483 pSReg->Sel = 0;
2484 pSReg->ValidSel = 0;
2485 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2486 {
2487 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2488 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2489 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2490 }
2491 else
2492 {
2493 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2494 /** @todo check this on AMD-V */
2495 pSReg->u64Base = 0;
2496 pSReg->u32Limit = 0;
2497 }
2498}
2499
2500
2501/**
2502 * Loads a segment selector during a task switch in V8086 mode.
2503 *
2504 * @param pSReg Pointer to the segment register.
2505 * @param uSel The selector value to load.
2506 */
2507DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2508{
2509 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2510 pSReg->Sel = uSel;
2511 pSReg->ValidSel = uSel;
2512 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2513 pSReg->u64Base = uSel << 4;
2514 pSReg->u32Limit = 0xffff;
2515 pSReg->Attr.u = 0xf3;
2516}
2517
2518
2519/**
2520 * Loads a segment selector during a task switch in protected mode.
2521 *
2522 * In this task switch scenario, we would throw \#TS exceptions rather than
2523 * \#GPs.
2524 *
2525 * @returns VBox strict status code.
2526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2527 * @param pSReg Pointer to the segment register.
2528 * @param uSel The new selector value.
2529 *
2530 * @remarks This does _not_ handle CS or SS.
2531 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2532 */
2533static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2534{
2535 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2536
2537 /* Null data selector. */
2538 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2539 {
2540 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2541 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2542 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2543 return VINF_SUCCESS;
2544 }
2545
2546 /* Fetch the descriptor. */
2547 IEMSELDESC Desc;
2548 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2549 if (rcStrict != VINF_SUCCESS)
2550 {
2551 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2552 VBOXSTRICTRC_VAL(rcStrict)));
2553 return rcStrict;
2554 }
2555
2556 /* Must be a data segment or readable code segment. */
2557 if ( !Desc.Legacy.Gen.u1DescType
2558 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2559 {
2560 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2561 Desc.Legacy.Gen.u4Type));
2562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2563 }
2564
2565 /* Check privileges for data segments and non-conforming code segments. */
2566 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2567 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2568 {
2569 /* The RPL and the new CPL must be less than or equal to the DPL. */
2570 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2571 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2572 {
2573 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2574 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2575 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2576 }
2577 }
2578
2579 /* Is it there? */
2580 if (!Desc.Legacy.Gen.u1Present)
2581 {
2582 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2583 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2584 }
2585
2586 /* The base and limit. */
2587 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2588 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2589
2590 /*
2591 * Ok, everything checked out fine. Now set the accessed bit before
2592 * committing the result into the registers.
2593 */
2594 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2595 {
2596 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2597 if (rcStrict != VINF_SUCCESS)
2598 return rcStrict;
2599 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2600 }
2601
2602 /* Commit */
2603 pSReg->Sel = uSel;
2604 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2605 pSReg->u32Limit = cbLimit;
2606 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2607 pSReg->ValidSel = uSel;
2608 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2609 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2610 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2611
2612 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2613 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2614 return VINF_SUCCESS;
2615}
2616
2617
2618/**
2619 * Performs a task switch.
2620 *
2621 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2622 * caller is responsible for performing the necessary checks (like DPL, TSS
2623 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2624 * reference for JMP, CALL, IRET.
2625 *
2626 * If the task switch is the due to a software interrupt or hardware exception,
2627 * the caller is responsible for validating the TSS selector and descriptor. See
2628 * Intel Instruction reference for INT n.
2629 *
2630 * @returns VBox strict status code.
2631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2632 * @param enmTaskSwitch The cause of the task switch.
2633 * @param uNextEip The EIP effective after the task switch.
2634 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2635 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2636 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2637 * @param SelTss The TSS selector of the new task.
2638 * @param pNewDescTss Pointer to the new TSS descriptor.
2639 */
2640VBOXSTRICTRC
2641iemTaskSwitch(PVMCPUCC pVCpu,
2642 IEMTASKSWITCH enmTaskSwitch,
2643 uint32_t uNextEip,
2644 uint32_t fFlags,
2645 uint16_t uErr,
2646 uint64_t uCr2,
2647 RTSEL SelTss,
2648 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2649{
2650 Assert(!IEM_IS_REAL_MODE(pVCpu));
2651 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2652 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2653
2654 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2655 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2656 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2657 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2658 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2659
2660 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2661 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2662
2663 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2664 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2665
2666 /* Update CR2 in case it's a page-fault. */
2667 /** @todo This should probably be done much earlier in IEM/PGM. See
2668 * @bugref{5653#c49}. */
2669 if (fFlags & IEM_XCPT_FLAGS_CR2)
2670 pVCpu->cpum.GstCtx.cr2 = uCr2;
2671
2672 /*
2673 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2674 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2675 */
2676 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2677 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2678 if (uNewTssLimit < uNewTssLimitMin)
2679 {
2680 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2681 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2682 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2683 }
2684
2685 /*
2686 * Task switches in VMX non-root mode always cause task switches.
2687 * The new TSS must have been read and validated (DPL, limits etc.) before a
2688 * task-switch VM-exit commences.
2689 *
2690 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2691 */
2692 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2693 {
2694 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2695 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2696 }
2697
2698 /*
2699 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2700 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2701 */
2702 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2703 {
2704 uint64_t const uExitInfo1 = SelTss;
2705 uint64_t uExitInfo2 = uErr;
2706 switch (enmTaskSwitch)
2707 {
2708 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2709 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2710 default: break;
2711 }
2712 if (fFlags & IEM_XCPT_FLAGS_ERR)
2713 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2714 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2715 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2716
2717 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2718 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2719 RT_NOREF2(uExitInfo1, uExitInfo2);
2720 }
2721
2722 /*
2723 * Check the current TSS limit. The last written byte to the current TSS during the
2724 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2725 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2726 *
2727 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2728 * end up with smaller than "legal" TSS limits.
2729 */
2730 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2731 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2732 if (uCurTssLimit < uCurTssLimitMin)
2733 {
2734 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2735 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2736 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2737 }
2738
2739 /*
2740 * Verify that the new TSS can be accessed and map it. Map only the required contents
2741 * and not the entire TSS.
2742 */
2743 uint8_t bUnmapInfoNewTss;
2744 void *pvNewTss;
2745 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2746 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2747 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2748 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2749 * not perform correct translation if this happens. See Intel spec. 7.2.1
2750 * "Task-State Segment". */
2751 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2752/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2753 * Consider wrapping the remainder into a function for simpler cleanup. */
2754 if (rcStrict != VINF_SUCCESS)
2755 {
2756 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2757 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2758 return rcStrict;
2759 }
2760
2761 /*
2762 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2763 */
2764 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2765 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2766 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2767 {
2768 uint8_t bUnmapInfoDescCurTss;
2769 PX86DESC pDescCurTss;
2770 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2771 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2772 if (rcStrict != VINF_SUCCESS)
2773 {
2774 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2775 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2776 return rcStrict;
2777 }
2778
2779 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2780 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2781 if (rcStrict != VINF_SUCCESS)
2782 {
2783 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2784 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2785 return rcStrict;
2786 }
2787
2788 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2789 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2790 {
2791 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2792 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2793 fEFlags &= ~X86_EFL_NT;
2794 }
2795 }
2796
2797 /*
2798 * Save the CPU state into the current TSS.
2799 */
2800 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2801 if (GCPtrNewTss == GCPtrCurTss)
2802 {
2803 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2804 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2805 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2806 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2807 pVCpu->cpum.GstCtx.ldtr.Sel));
2808 }
2809 if (fIsNewTss386)
2810 {
2811 /*
2812 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2813 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2814 */
2815 uint8_t bUnmapInfoCurTss32;
2816 void *pvCurTss32;
2817 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2818 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2819 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2820 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2821 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2822 if (rcStrict != VINF_SUCCESS)
2823 {
2824 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2825 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2826 return rcStrict;
2827 }
2828
2829 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2830 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2831 pCurTss32->eip = uNextEip;
2832 pCurTss32->eflags = fEFlags;
2833 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2834 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2835 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2836 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2837 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2838 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2839 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2840 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2841 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2842 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2843 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2844 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2845 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2846 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2847
2848 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2849 if (rcStrict != VINF_SUCCESS)
2850 {
2851 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2852 VBOXSTRICTRC_VAL(rcStrict)));
2853 return rcStrict;
2854 }
2855 }
2856 else
2857 {
2858 /*
2859 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2860 */
2861 uint8_t bUnmapInfoCurTss16;
2862 void *pvCurTss16;
2863 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2864 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2865 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2866 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2867 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2868 if (rcStrict != VINF_SUCCESS)
2869 {
2870 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2871 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2872 return rcStrict;
2873 }
2874
2875 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2876 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2877 pCurTss16->ip = uNextEip;
2878 pCurTss16->flags = (uint16_t)fEFlags;
2879 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2880 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2881 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2882 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2883 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2884 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2885 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2886 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2887 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2888 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2889 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2890 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2891
2892 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2893 if (rcStrict != VINF_SUCCESS)
2894 {
2895 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2896 VBOXSTRICTRC_VAL(rcStrict)));
2897 return rcStrict;
2898 }
2899 }
2900
2901 /*
2902 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2903 */
2904 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2905 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2906 {
2907 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2908 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2909 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2910 }
2911
2912 /*
2913 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2914 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2915 */
2916 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2917 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2918 bool fNewDebugTrap;
2919 if (fIsNewTss386)
2920 {
2921 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2922 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2923 uNewEip = pNewTss32->eip;
2924 uNewEflags = pNewTss32->eflags;
2925 uNewEax = pNewTss32->eax;
2926 uNewEcx = pNewTss32->ecx;
2927 uNewEdx = pNewTss32->edx;
2928 uNewEbx = pNewTss32->ebx;
2929 uNewEsp = pNewTss32->esp;
2930 uNewEbp = pNewTss32->ebp;
2931 uNewEsi = pNewTss32->esi;
2932 uNewEdi = pNewTss32->edi;
2933 uNewES = pNewTss32->es;
2934 uNewCS = pNewTss32->cs;
2935 uNewSS = pNewTss32->ss;
2936 uNewDS = pNewTss32->ds;
2937 uNewFS = pNewTss32->fs;
2938 uNewGS = pNewTss32->gs;
2939 uNewLdt = pNewTss32->selLdt;
2940 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2941 }
2942 else
2943 {
2944 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2945 uNewCr3 = 0;
2946 uNewEip = pNewTss16->ip;
2947 uNewEflags = pNewTss16->flags;
2948 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2949 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2950 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2951 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2952 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2953 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2954 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2955 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2956 uNewES = pNewTss16->es;
2957 uNewCS = pNewTss16->cs;
2958 uNewSS = pNewTss16->ss;
2959 uNewDS = pNewTss16->ds;
2960 uNewFS = 0;
2961 uNewGS = 0;
2962 uNewLdt = pNewTss16->selLdt;
2963 fNewDebugTrap = false;
2964 }
2965
2966 if (GCPtrNewTss == GCPtrCurTss)
2967 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2968 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2969
2970 /*
2971 * We're done accessing the new TSS.
2972 */
2973 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2974 if (rcStrict != VINF_SUCCESS)
2975 {
2976 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2977 return rcStrict;
2978 }
2979
2980 /*
2981 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2982 */
2983 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2984 {
2985 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2986 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2987 if (rcStrict != VINF_SUCCESS)
2988 {
2989 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2990 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2991 return rcStrict;
2992 }
2993
2994 /* Check that the descriptor indicates the new TSS is available (not busy). */
2995 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2996 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2997 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2998
2999 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3000 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
3001 if (rcStrict != VINF_SUCCESS)
3002 {
3003 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3004 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3005 return rcStrict;
3006 }
3007 }
3008
3009 /*
3010 * From this point on, we're technically in the new task. We will defer exceptions
3011 * until the completion of the task switch but before executing any instructions in the new task.
3012 */
3013 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
3014 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
3015 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3016 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
3017 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
3018 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
3019 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3020
3021 /* Set the busy bit in TR. */
3022 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3023
3024 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3025 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3026 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3027 {
3028 uNewEflags |= X86_EFL_NT;
3029 }
3030
3031 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3032 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3033 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3034
3035 pVCpu->cpum.GstCtx.eip = uNewEip;
3036 pVCpu->cpum.GstCtx.eax = uNewEax;
3037 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3038 pVCpu->cpum.GstCtx.edx = uNewEdx;
3039 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3040 pVCpu->cpum.GstCtx.esp = uNewEsp;
3041 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3042 pVCpu->cpum.GstCtx.esi = uNewEsi;
3043 pVCpu->cpum.GstCtx.edi = uNewEdi;
3044
3045 uNewEflags &= X86_EFL_LIVE_MASK;
3046 uNewEflags |= X86_EFL_RA1_MASK;
3047 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3048
3049 /*
3050 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3051 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3052 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3053 */
3054 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3055 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3056
3057 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3058 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3059
3060 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3061 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3062
3063 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3064 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3065
3066 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3067 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3068
3069 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3070 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3071 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3072
3073 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3074 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3075 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3076 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3077
3078 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3079 {
3080 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3081 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3082 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3083 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3084 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3085 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3086 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3087 }
3088
3089 /*
3090 * Switch CR3 for the new task.
3091 */
3092 if ( fIsNewTss386
3093 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3094 {
3095 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3096 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3097 AssertRCSuccessReturn(rc, rc);
3098
3099 /* Inform PGM. */
3100 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3101 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3102 AssertRCReturn(rc, rc);
3103 /* ignore informational status codes */
3104
3105 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3106 }
3107
3108 /*
3109 * Switch LDTR for the new task.
3110 */
3111 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3112 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3113 else
3114 {
3115 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3116
3117 IEMSELDESC DescNewLdt;
3118 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3119 if (rcStrict != VINF_SUCCESS)
3120 {
3121 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3122 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3123 return rcStrict;
3124 }
3125 if ( !DescNewLdt.Legacy.Gen.u1Present
3126 || DescNewLdt.Legacy.Gen.u1DescType
3127 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3128 {
3129 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3130 uNewLdt, DescNewLdt.Legacy.u));
3131 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3132 }
3133
3134 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3135 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3136 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3137 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3138 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3139 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3140 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3142 }
3143
3144 IEMSELDESC DescSS;
3145 if (IEM_IS_V86_MODE(pVCpu))
3146 {
3147 IEM_SET_CPL(pVCpu, 3);
3148 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3149 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3150 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3151 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3152 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3153 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3154
3155 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3156 DescSS.Legacy.u = 0;
3157 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3158 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3159 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3160 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3161 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3162 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3163 DescSS.Legacy.Gen.u2Dpl = 3;
3164 }
3165 else
3166 {
3167 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3168
3169 /*
3170 * Load the stack segment for the new task.
3171 */
3172 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3173 {
3174 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3175 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3176 }
3177
3178 /* Fetch the descriptor. */
3179 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3180 if (rcStrict != VINF_SUCCESS)
3181 {
3182 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3183 VBOXSTRICTRC_VAL(rcStrict)));
3184 return rcStrict;
3185 }
3186
3187 /* SS must be a data segment and writable. */
3188 if ( !DescSS.Legacy.Gen.u1DescType
3189 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3190 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3191 {
3192 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3193 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3194 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3195 }
3196
3197 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3198 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3199 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3200 {
3201 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3202 uNewCpl));
3203 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3204 }
3205
3206 /* Is it there? */
3207 if (!DescSS.Legacy.Gen.u1Present)
3208 {
3209 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3210 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3211 }
3212
3213 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3214 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3215
3216 /* Set the accessed bit before committing the result into SS. */
3217 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3218 {
3219 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3220 if (rcStrict != VINF_SUCCESS)
3221 return rcStrict;
3222 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3223 }
3224
3225 /* Commit SS. */
3226 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3227 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3228 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3229 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3230 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3231 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3233
3234 /* CPL has changed, update IEM before loading rest of segments. */
3235 IEM_SET_CPL(pVCpu, uNewCpl);
3236
3237 /*
3238 * Load the data segments for the new task.
3239 */
3240 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3241 if (rcStrict != VINF_SUCCESS)
3242 return rcStrict;
3243 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3244 if (rcStrict != VINF_SUCCESS)
3245 return rcStrict;
3246 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3247 if (rcStrict != VINF_SUCCESS)
3248 return rcStrict;
3249 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3250 if (rcStrict != VINF_SUCCESS)
3251 return rcStrict;
3252
3253 /*
3254 * Load the code segment for the new task.
3255 */
3256 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3257 {
3258 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3260 }
3261
3262 /* Fetch the descriptor. */
3263 IEMSELDESC DescCS;
3264 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3265 if (rcStrict != VINF_SUCCESS)
3266 {
3267 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3268 return rcStrict;
3269 }
3270
3271 /* CS must be a code segment. */
3272 if ( !DescCS.Legacy.Gen.u1DescType
3273 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3274 {
3275 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3276 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3277 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3278 }
3279
3280 /* For conforming CS, DPL must be less than or equal to the RPL. */
3281 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3282 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3283 {
3284 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3285 DescCS.Legacy.Gen.u2Dpl));
3286 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3287 }
3288
3289 /* For non-conforming CS, DPL must match RPL. */
3290 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3291 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3292 {
3293 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3294 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3295 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3296 }
3297
3298 /* Is it there? */
3299 if (!DescCS.Legacy.Gen.u1Present)
3300 {
3301 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3302 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3303 }
3304
3305 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3306 u64Base = X86DESC_BASE(&DescCS.Legacy);
3307
3308 /* Set the accessed bit before committing the result into CS. */
3309 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3310 {
3311 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3315 }
3316
3317 /* Commit CS. */
3318 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3319 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3320 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3321 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3322 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3323 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3324 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3325 }
3326
3327 /* Make sure the CPU mode is correct. */
3328 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3329 if (fExecNew != pVCpu->iem.s.fExec)
3330 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3331 pVCpu->iem.s.fExec = fExecNew;
3332
3333 /** @todo Debug trap. */
3334 if (fIsNewTss386 && fNewDebugTrap)
3335 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3336
3337 /*
3338 * Construct the error code masks based on what caused this task switch.
3339 * See Intel Instruction reference for INT.
3340 */
3341 uint16_t uExt;
3342 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3343 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3344 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3345 uExt = 1;
3346 else
3347 uExt = 0;
3348
3349 /*
3350 * Push any error code on to the new stack.
3351 */
3352 if (fFlags & IEM_XCPT_FLAGS_ERR)
3353 {
3354 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3355 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3356 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3357
3358 /* Check that there is sufficient space on the stack. */
3359 /** @todo Factor out segment limit checking for normal/expand down segments
3360 * into a separate function. */
3361 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3362 {
3363 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3364 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3365 {
3366 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3367 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3368 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3369 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3370 }
3371 }
3372 else
3373 {
3374 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3375 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3376 {
3377 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3378 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3379 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3380 }
3381 }
3382
3383
3384 if (fIsNewTss386)
3385 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3386 else
3387 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3388 if (rcStrict != VINF_SUCCESS)
3389 {
3390 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3391 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3392 return rcStrict;
3393 }
3394 }
3395
3396 /* Check the new EIP against the new CS limit. */
3397 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3398 {
3399 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3400 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3401 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3402 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3403 }
3404
3405 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3406 pVCpu->cpum.GstCtx.ss.Sel));
3407 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3408}
3409
3410
3411/**
3412 * Implements exceptions and interrupts for protected mode.
3413 *
3414 * @returns VBox strict status code.
3415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3416 * @param cbInstr The number of bytes to offset rIP by in the return
3417 * address.
3418 * @param u8Vector The interrupt / exception vector number.
3419 * @param fFlags The flags.
3420 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3421 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3422 */
3423static VBOXSTRICTRC
3424iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3425 uint8_t cbInstr,
3426 uint8_t u8Vector,
3427 uint32_t fFlags,
3428 uint16_t uErr,
3429 uint64_t uCr2) RT_NOEXCEPT
3430{
3431 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3432
3433 /*
3434 * Read the IDT entry.
3435 */
3436 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3437 {
3438 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3439 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3440 }
3441 X86DESC Idte;
3442 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3443 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3444 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3445 {
3446 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3447 return rcStrict;
3448 }
3449 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3450 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3451 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3452 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3453
3454 /*
3455 * Check the descriptor type, DPL and such.
3456 * ASSUMES this is done in the same order as described for call-gate calls.
3457 */
3458 if (Idte.Gate.u1DescType)
3459 {
3460 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3461 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3462 }
3463 bool fTaskGate = false;
3464 uint8_t f32BitGate = true;
3465 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3466 switch (Idte.Gate.u4Type)
3467 {
3468 case X86_SEL_TYPE_SYS_UNDEFINED:
3469 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3470 case X86_SEL_TYPE_SYS_LDT:
3471 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3472 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3473 case X86_SEL_TYPE_SYS_UNDEFINED2:
3474 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3475 case X86_SEL_TYPE_SYS_UNDEFINED3:
3476 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3477 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3478 case X86_SEL_TYPE_SYS_UNDEFINED4:
3479 {
3480 /** @todo check what actually happens when the type is wrong...
3481 * esp. call gates. */
3482 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3483 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3484 }
3485
3486 case X86_SEL_TYPE_SYS_286_INT_GATE:
3487 f32BitGate = false;
3488 RT_FALL_THRU();
3489 case X86_SEL_TYPE_SYS_386_INT_GATE:
3490 fEflToClear |= X86_EFL_IF;
3491 break;
3492
3493 case X86_SEL_TYPE_SYS_TASK_GATE:
3494 fTaskGate = true;
3495#ifndef IEM_IMPLEMENTS_TASKSWITCH
3496 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3497#endif
3498 break;
3499
3500 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3501 f32BitGate = false;
3502 break;
3503 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3504 break;
3505
3506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3507 }
3508
3509 /* Check DPL against CPL if applicable. */
3510 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3511 {
3512 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3513 {
3514 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3515 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3516 }
3517 }
3518
3519 /* Is it there? */
3520 if (!Idte.Gate.u1Present)
3521 {
3522 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3523 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3524 }
3525
3526 /* Is it a task-gate? */
3527 if (fTaskGate)
3528 {
3529 /*
3530 * Construct the error code masks based on what caused this task switch.
3531 * See Intel Instruction reference for INT.
3532 */
3533 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3534 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3535 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3536 RTSEL SelTss = Idte.Gate.u16Sel;
3537
3538 /*
3539 * Fetch the TSS descriptor in the GDT.
3540 */
3541 IEMSELDESC DescTSS;
3542 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3543 if (rcStrict != VINF_SUCCESS)
3544 {
3545 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3546 VBOXSTRICTRC_VAL(rcStrict)));
3547 return rcStrict;
3548 }
3549
3550 /* The TSS descriptor must be a system segment and be available (not busy). */
3551 if ( DescTSS.Legacy.Gen.u1DescType
3552 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3553 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3554 {
3555 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3556 u8Vector, SelTss, DescTSS.Legacy.au64));
3557 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3558 }
3559
3560 /* The TSS must be present. */
3561 if (!DescTSS.Legacy.Gen.u1Present)
3562 {
3563 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3564 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3565 }
3566
3567 /* Do the actual task switch. */
3568 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3569 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3570 fFlags, uErr, uCr2, SelTss, &DescTSS);
3571 }
3572
3573 /* A null CS is bad. */
3574 RTSEL NewCS = Idte.Gate.u16Sel;
3575 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3576 {
3577 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3578 return iemRaiseGeneralProtectionFault0(pVCpu);
3579 }
3580
3581 /* Fetch the descriptor for the new CS. */
3582 IEMSELDESC DescCS;
3583 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3584 if (rcStrict != VINF_SUCCESS)
3585 {
3586 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3587 return rcStrict;
3588 }
3589
3590 /* Must be a code segment. */
3591 if (!DescCS.Legacy.Gen.u1DescType)
3592 {
3593 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3594 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3595 }
3596 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3597 {
3598 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3599 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3600 }
3601
3602 /* Don't allow lowering the privilege level. */
3603 /** @todo Does the lowering of privileges apply to software interrupts
3604 * only? This has bearings on the more-privileged or
3605 * same-privilege stack behavior further down. A testcase would
3606 * be nice. */
3607 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3608 {
3609 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3610 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3611 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3612 }
3613
3614 /* Make sure the selector is present. */
3615 if (!DescCS.Legacy.Gen.u1Present)
3616 {
3617 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3618 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3619 }
3620
3621#ifdef LOG_ENABLED
3622 /* If software interrupt, try decode it if logging is enabled and such. */
3623 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3624 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3625 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3626#endif
3627
3628 /* Check the new EIP against the new CS limit. */
3629 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3630 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3631 ? Idte.Gate.u16OffsetLow
3632 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3633 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3634 if (uNewEip > cbLimitCS)
3635 {
3636 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3637 u8Vector, uNewEip, cbLimitCS, NewCS));
3638 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3639 }
3640 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3641
3642 /* Calc the flag image to push. */
3643 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3644 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3645 fEfl &= ~X86_EFL_RF;
3646 else
3647 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3648
3649 /* From V8086 mode only go to CPL 0. */
3650 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3651 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3652 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3653 {
3654 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3655 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3656 }
3657
3658 /*
3659 * If the privilege level changes, we need to get a new stack from the TSS.
3660 * This in turns means validating the new SS and ESP...
3661 */
3662 if (uNewCpl != IEM_GET_CPL(pVCpu))
3663 {
3664 RTSEL NewSS;
3665 uint32_t uNewEsp;
3666 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3667 if (rcStrict != VINF_SUCCESS)
3668 return rcStrict;
3669
3670 IEMSELDESC DescSS;
3671 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3675 if (!DescSS.Legacy.Gen.u1DefBig)
3676 {
3677 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3678 uNewEsp = (uint16_t)uNewEsp;
3679 }
3680
3681 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3682
3683 /* Check that there is sufficient space for the stack frame. */
3684 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3685 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3686 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3687 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3688
3689 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3690 {
3691 if ( uNewEsp - 1 > cbLimitSS
3692 || uNewEsp < cbStackFrame)
3693 {
3694 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3695 u8Vector, NewSS, uNewEsp, cbStackFrame));
3696 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3697 }
3698 }
3699 else
3700 {
3701 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3702 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3703 {
3704 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3705 u8Vector, NewSS, uNewEsp, cbStackFrame));
3706 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3707 }
3708 }
3709
3710 /*
3711 * Start making changes.
3712 */
3713
3714 /* Set the new CPL so that stack accesses use it. */
3715 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3716 IEM_SET_CPL(pVCpu, uNewCpl);
3717
3718 /* Create the stack frame. */
3719 uint8_t bUnmapInfoStackFrame;
3720 RTPTRUNION uStackFrame;
3721 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3722 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3723 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3724 if (rcStrict != VINF_SUCCESS)
3725 return rcStrict;
3726 if (f32BitGate)
3727 {
3728 if (fFlags & IEM_XCPT_FLAGS_ERR)
3729 *uStackFrame.pu32++ = uErr;
3730 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3731 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3732 uStackFrame.pu32[2] = fEfl;
3733 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3734 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3735 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3736 if (fEfl & X86_EFL_VM)
3737 {
3738 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3739 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3740 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3741 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3742 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3743 }
3744 }
3745 else
3746 {
3747 if (fFlags & IEM_XCPT_FLAGS_ERR)
3748 *uStackFrame.pu16++ = uErr;
3749 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3750 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3751 uStackFrame.pu16[2] = fEfl;
3752 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3753 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3754 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3755 if (fEfl & X86_EFL_VM)
3756 {
3757 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3758 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3759 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3760 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3761 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3762 }
3763 }
3764 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3765 if (rcStrict != VINF_SUCCESS)
3766 return rcStrict;
3767
3768 /* Mark the selectors 'accessed' (hope this is the correct time). */
3769 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3770 * after pushing the stack frame? (Write protect the gdt + stack to
3771 * find out.) */
3772 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3773 {
3774 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3778 }
3779
3780 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3781 {
3782 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3783 if (rcStrict != VINF_SUCCESS)
3784 return rcStrict;
3785 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3786 }
3787
3788 /*
3789 * Start comitting the register changes (joins with the DPL=CPL branch).
3790 */
3791 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3792 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3793 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3794 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3795 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3796 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3797 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3798 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3799 * SP is loaded).
3800 * Need to check the other combinations too:
3801 * - 16-bit TSS, 32-bit handler
3802 * - 32-bit TSS, 16-bit handler */
3803 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3804 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3805 else
3806 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3807
3808 if (fEfl & X86_EFL_VM)
3809 {
3810 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3811 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3812 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3813 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3814 }
3815 }
3816 /*
3817 * Same privilege, no stack change and smaller stack frame.
3818 */
3819 else
3820 {
3821 uint64_t uNewRsp;
3822 uint8_t bUnmapInfoStackFrame;
3823 RTPTRUNION uStackFrame;
3824 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3825 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3826 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3827 if (rcStrict != VINF_SUCCESS)
3828 return rcStrict;
3829
3830 if (f32BitGate)
3831 {
3832 if (fFlags & IEM_XCPT_FLAGS_ERR)
3833 *uStackFrame.pu32++ = uErr;
3834 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3835 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3836 uStackFrame.pu32[2] = fEfl;
3837 }
3838 else
3839 {
3840 if (fFlags & IEM_XCPT_FLAGS_ERR)
3841 *uStackFrame.pu16++ = uErr;
3842 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3843 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3844 uStackFrame.pu16[2] = fEfl;
3845 }
3846 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3847 if (rcStrict != VINF_SUCCESS)
3848 return rcStrict;
3849
3850 /* Mark the CS selector as 'accessed'. */
3851 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3852 {
3853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3854 if (rcStrict != VINF_SUCCESS)
3855 return rcStrict;
3856 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3857 }
3858
3859 /*
3860 * Start committing the register changes (joins with the other branch).
3861 */
3862 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3863 }
3864
3865 /* ... register committing continues. */
3866 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3867 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3868 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3869 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3870 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3871 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3872
3873 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3874 fEfl &= ~fEflToClear;
3875 IEMMISC_SET_EFL(pVCpu, fEfl);
3876
3877 if (fFlags & IEM_XCPT_FLAGS_CR2)
3878 pVCpu->cpum.GstCtx.cr2 = uCr2;
3879
3880 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3881 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3882
3883 /* Make sure the execution flags are correct. */
3884 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3885 if (fExecNew != pVCpu->iem.s.fExec)
3886 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3887 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3888 pVCpu->iem.s.fExec = fExecNew;
3889 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3890
3891 /*
3892 * Deal with debug events that follows the exception and clear inhibit flags.
3893 */
3894 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3895 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3896 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3897 else
3898 {
3899 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3900 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3901 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3902 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3903 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3904 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3905 return iemRaiseDebugException(pVCpu);
3906 }
3907
3908 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3909}
3910
3911
3912/**
3913 * Implements exceptions and interrupts for long mode.
3914 *
3915 * @returns VBox strict status code.
3916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3917 * @param cbInstr The number of bytes to offset rIP by in the return
3918 * address.
3919 * @param u8Vector The interrupt / exception vector number.
3920 * @param fFlags The flags.
3921 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3922 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3923 */
3924static VBOXSTRICTRC
3925iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3926 uint8_t cbInstr,
3927 uint8_t u8Vector,
3928 uint32_t fFlags,
3929 uint16_t uErr,
3930 uint64_t uCr2) RT_NOEXCEPT
3931{
3932 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3933
3934 /*
3935 * Read the IDT entry.
3936 */
3937 uint16_t offIdt = (uint16_t)u8Vector << 4;
3938 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3939 {
3940 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3941 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3942 }
3943 X86DESC64 Idte;
3944#ifdef _MSC_VER /* Shut up silly compiler warning. */
3945 Idte.au64[0] = 0;
3946 Idte.au64[1] = 0;
3947#endif
3948 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3949 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3950 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3951 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3952 {
3953 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3954 return rcStrict;
3955 }
3956 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3957 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3958 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3959
3960 /*
3961 * Check the descriptor type, DPL and such.
3962 * ASSUMES this is done in the same order as described for call-gate calls.
3963 */
3964 if (Idte.Gate.u1DescType)
3965 {
3966 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3967 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3968 }
3969 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3970 switch (Idte.Gate.u4Type)
3971 {
3972 case AMD64_SEL_TYPE_SYS_INT_GATE:
3973 fEflToClear |= X86_EFL_IF;
3974 break;
3975 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3976 break;
3977
3978 default:
3979 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3980 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3981 }
3982
3983 /* Check DPL against CPL if applicable. */
3984 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3985 {
3986 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3987 {
3988 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3989 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3990 }
3991 }
3992
3993 /* Is it there? */
3994 if (!Idte.Gate.u1Present)
3995 {
3996 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3997 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3998 }
3999
4000 /* A null CS is bad. */
4001 RTSEL NewCS = Idte.Gate.u16Sel;
4002 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4003 {
4004 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4005 return iemRaiseGeneralProtectionFault0(pVCpu);
4006 }
4007
4008 /* Fetch the descriptor for the new CS. */
4009 IEMSELDESC DescCS;
4010 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4011 if (rcStrict != VINF_SUCCESS)
4012 {
4013 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4014 return rcStrict;
4015 }
4016
4017 /* Must be a 64-bit code segment. */
4018 if (!DescCS.Long.Gen.u1DescType)
4019 {
4020 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4021 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4022 }
4023 if ( !DescCS.Long.Gen.u1Long
4024 || DescCS.Long.Gen.u1DefBig
4025 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4026 {
4027 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4028 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4029 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4030 }
4031
4032 /* Don't allow lowering the privilege level. For non-conforming CS
4033 selectors, the CS.DPL sets the privilege level the trap/interrupt
4034 handler runs at. For conforming CS selectors, the CPL remains
4035 unchanged, but the CS.DPL must be <= CPL. */
4036 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4037 * when CPU in Ring-0. Result \#GP? */
4038 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4039 {
4040 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4041 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4042 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4043 }
4044
4045
4046 /* Make sure the selector is present. */
4047 if (!DescCS.Legacy.Gen.u1Present)
4048 {
4049 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4050 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4051 }
4052
4053 /* Check that the new RIP is canonical. */
4054 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4055 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4056 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4057 if (!IEM_IS_CANONICAL(uNewRip))
4058 {
4059 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4060 return iemRaiseGeneralProtectionFault0(pVCpu);
4061 }
4062
4063 /*
4064 * If the privilege level changes or if the IST isn't zero, we need to get
4065 * a new stack from the TSS.
4066 */
4067 uint64_t uNewRsp;
4068 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4069 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4070 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4071 || Idte.Gate.u3IST != 0)
4072 {
4073 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4074 if (rcStrict != VINF_SUCCESS)
4075 return rcStrict;
4076 }
4077 else
4078 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4079 uNewRsp &= ~(uint64_t)0xf;
4080
4081 /*
4082 * Calc the flag image to push.
4083 */
4084 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4085 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4086 fEfl &= ~X86_EFL_RF;
4087 else
4088 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4089
4090 /*
4091 * Start making changes.
4092 */
4093 /* Set the new CPL so that stack accesses use it. */
4094 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4095 IEM_SET_CPL(pVCpu, uNewCpl);
4096/** @todo Setting CPL this early seems wrong as it would affect and errors we
4097 * raise accessing the stack and (?) GDT/LDT... */
4098
4099 /* Create the stack frame. */
4100 uint8_t bUnmapInfoStackFrame;
4101 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4102 RTPTRUNION uStackFrame;
4103 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4104 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4105 if (rcStrict != VINF_SUCCESS)
4106 return rcStrict;
4107
4108 if (fFlags & IEM_XCPT_FLAGS_ERR)
4109 *uStackFrame.pu64++ = uErr;
4110 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4111 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4112 uStackFrame.pu64[2] = fEfl;
4113 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4114 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4115 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4116 if (rcStrict != VINF_SUCCESS)
4117 return rcStrict;
4118
4119 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4120 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4121 * after pushing the stack frame? (Write protect the gdt + stack to
4122 * find out.) */
4123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4124 {
4125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4126 if (rcStrict != VINF_SUCCESS)
4127 return rcStrict;
4128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4129 }
4130
4131 /*
4132 * Start comitting the register changes.
4133 */
4134 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4135 * hidden registers when interrupting 32-bit or 16-bit code! */
4136 if (uNewCpl != uOldCpl)
4137 {
4138 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4139 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4140 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4141 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4142 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4143 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4144 }
4145 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4146 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4147 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4148 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4149 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4150 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4151 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4152 pVCpu->cpum.GstCtx.rip = uNewRip;
4153
4154 fEfl &= ~fEflToClear;
4155 IEMMISC_SET_EFL(pVCpu, fEfl);
4156
4157 if (fFlags & IEM_XCPT_FLAGS_CR2)
4158 pVCpu->cpum.GstCtx.cr2 = uCr2;
4159
4160 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4161 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4162
4163 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4164
4165 /*
4166 * Deal with debug events that follows the exception and clear inhibit flags.
4167 */
4168 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4169 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4170 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4171 else
4172 {
4173 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4174 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4175 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4176 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4177 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4178 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4179 return iemRaiseDebugException(pVCpu);
4180 }
4181
4182 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4183}
4184
4185
4186/**
4187 * Implements exceptions and interrupts.
4188 *
4189 * All exceptions and interrupts goes thru this function!
4190 *
4191 * @returns VBox strict status code.
4192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4193 * @param cbInstr The number of bytes to offset rIP by in the return
4194 * address.
4195 * @param u8Vector The interrupt / exception vector number.
4196 * @param fFlags The flags.
4197 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4198 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4199 */
4200VBOXSTRICTRC
4201iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4202 uint8_t cbInstr,
4203 uint8_t u8Vector,
4204 uint32_t fFlags,
4205 uint16_t uErr,
4206 uint64_t uCr2) RT_NOEXCEPT
4207{
4208 /*
4209 * Get all the state that we might need here.
4210 */
4211 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4212 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4213
4214#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4215 /*
4216 * Flush prefetch buffer
4217 */
4218 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4219#endif
4220
4221 /*
4222 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4223 */
4224 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4225 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4226 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4227 | IEM_XCPT_FLAGS_BP_INSTR
4228 | IEM_XCPT_FLAGS_ICEBP_INSTR
4229 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4230 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4231 {
4232 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4233 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4234 u8Vector = X86_XCPT_GP;
4235 uErr = 0;
4236 }
4237
4238 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4239#ifdef DBGFTRACE_ENABLED
4240 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4241 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4243#endif
4244
4245 /*
4246 * Check if DBGF wants to intercept the exception.
4247 */
4248 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4249 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4250 { /* likely */ }
4251 else
4252 {
4253 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4254 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4255 if (rcStrict != VINF_SUCCESS)
4256 return rcStrict;
4257 }
4258
4259 /*
4260 * Evaluate whether NMI blocking should be in effect.
4261 * Normally, NMI blocking is in effect whenever we inject an NMI.
4262 */
4263 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4264 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4265
4266#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4267 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4268 {
4269 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4270 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4271 return rcStrict0;
4272
4273 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4274 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4275 {
4276 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4277 fBlockNmi = false;
4278 }
4279 }
4280#endif
4281
4282#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4283 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4284 {
4285 /*
4286 * If the event is being injected as part of VMRUN, it isn't subject to event
4287 * intercepts in the nested-guest. However, secondary exceptions that occur
4288 * during injection of any event -are- subject to exception intercepts.
4289 *
4290 * See AMD spec. 15.20 "Event Injection".
4291 */
4292 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4293 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4294 else
4295 {
4296 /*
4297 * Check and handle if the event being raised is intercepted.
4298 */
4299 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4300 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4301 return rcStrict0;
4302 }
4303 }
4304#endif
4305
4306 /*
4307 * Set NMI blocking if necessary.
4308 */
4309 if (fBlockNmi)
4310 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4311
4312 /*
4313 * Do recursion accounting.
4314 */
4315 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4316 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4317 if (pVCpu->iem.s.cXcptRecursions == 0)
4318 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4319 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4320 else
4321 {
4322 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4323 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4324 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4325
4326 if (pVCpu->iem.s.cXcptRecursions >= 4)
4327 {
4328#ifdef DEBUG_bird
4329 AssertFailed();
4330#endif
4331 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4332 }
4333
4334 /*
4335 * Evaluate the sequence of recurring events.
4336 */
4337 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4338 NULL /* pXcptRaiseInfo */);
4339 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4340 { /* likely */ }
4341 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4342 {
4343 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4344 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4345 u8Vector = X86_XCPT_DF;
4346 uErr = 0;
4347#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4348 /* VMX nested-guest #DF intercept needs to be checked here. */
4349 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4350 {
4351 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4352 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4353 return rcStrict0;
4354 }
4355#endif
4356 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4357 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4358 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4359 }
4360 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4361 {
4362 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4363 return iemInitiateCpuShutdown(pVCpu);
4364 }
4365 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4366 {
4367 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4368 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4369 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4370 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4371 return VERR_EM_GUEST_CPU_HANG;
4372 }
4373 else
4374 {
4375 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4376 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4377 return VERR_IEM_IPE_9;
4378 }
4379
4380 /*
4381 * The 'EXT' bit is set when an exception occurs during deliver of an external
4382 * event (such as an interrupt or earlier exception)[1]. Privileged software
4383 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4384 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4385 *
4386 * [1] - Intel spec. 6.13 "Error Code"
4387 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4388 * [3] - Intel Instruction reference for INT n.
4389 */
4390 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4391 && (fFlags & IEM_XCPT_FLAGS_ERR)
4392 && u8Vector != X86_XCPT_PF
4393 && u8Vector != X86_XCPT_DF)
4394 {
4395 uErr |= X86_TRAP_ERR_EXTERNAL;
4396 }
4397 }
4398
4399 pVCpu->iem.s.cXcptRecursions++;
4400 pVCpu->iem.s.uCurXcpt = u8Vector;
4401 pVCpu->iem.s.fCurXcpt = fFlags;
4402 pVCpu->iem.s.uCurXcptErr = uErr;
4403 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4404
4405 /*
4406 * Extensive logging.
4407 */
4408#if defined(LOG_ENABLED) && defined(IN_RING3)
4409 if (LogIs3Enabled())
4410 {
4411 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4412 char szRegs[4096];
4413 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4414 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4415 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4416 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4417 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4418 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4419 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4420 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4421 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4422 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4423 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4424 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4425 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4426 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4427 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4428 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4429 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4430 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4431 " efer=%016VR{efer}\n"
4432 " pat=%016VR{pat}\n"
4433 " sf_mask=%016VR{sf_mask}\n"
4434 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4435 " lstar=%016VR{lstar}\n"
4436 " star=%016VR{star} cstar=%016VR{cstar}\n"
4437 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4438 );
4439
4440 char szInstr[256];
4441 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4442 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4443 szInstr, sizeof(szInstr), NULL);
4444 Log3(("%s%s\n", szRegs, szInstr));
4445 }
4446#endif /* LOG_ENABLED */
4447
4448 /*
4449 * Stats.
4450 */
4451 uint64_t const uTimestamp = ASMReadTSC();
4452 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4453 {
4454 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4455 EMHistoryAddExit(pVCpu,
4456 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4457 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4458 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4459 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4460 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
4461 }
4462 else
4463 {
4464 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4465 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4466 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4467 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4468 if (fFlags & IEM_XCPT_FLAGS_ERR)
4469 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4470 if (fFlags & IEM_XCPT_FLAGS_CR2)
4471 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4472 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
4473 }
4474
4475 /*
4476 * Hack alert! Convert incoming debug events to slient on Intel.
4477 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4478 */
4479 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4480 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4481 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4482 { /* ignore */ }
4483 else
4484 {
4485 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4486 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4487 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4488 | CPUMCTX_DBG_HIT_DRX_SILENT;
4489 }
4490
4491 /*
4492 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4493 * to ensure that a stale TLB or paging cache entry will only cause one
4494 * spurious #PF.
4495 */
4496 if ( u8Vector == X86_XCPT_PF
4497 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4498 IEMTlbInvalidatePage(pVCpu, uCr2);
4499
4500 /*
4501 * Call the mode specific worker function.
4502 */
4503 VBOXSTRICTRC rcStrict;
4504 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4505 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4506 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4507 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4508 else
4509 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4510
4511 /* Flush the prefetch buffer. */
4512 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4513
4514 /*
4515 * Unwind.
4516 */
4517 pVCpu->iem.s.cXcptRecursions--;
4518 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4519 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4520 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4521 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4522 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4523 return rcStrict;
4524}
4525
4526#ifdef IEM_WITH_SETJMP
4527/**
4528 * See iemRaiseXcptOrInt. Will not return.
4529 */
4530DECL_NO_RETURN(void)
4531iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4532 uint8_t cbInstr,
4533 uint8_t u8Vector,
4534 uint32_t fFlags,
4535 uint16_t uErr,
4536 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4537{
4538 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4539 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4540}
4541#endif
4542
4543
4544/** \#DE - 00. */
4545VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4546{
4547 if (GCMIsInterceptingXcptDE(pVCpu))
4548 {
4549 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4550 if (rc == VINF_SUCCESS)
4551 {
4552 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4553 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4554 }
4555 }
4556 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4557}
4558
4559
4560#ifdef IEM_WITH_SETJMP
4561/** \#DE - 00. */
4562DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4563{
4564 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4565}
4566#endif
4567
4568
4569/** \#DB - 01.
4570 * @note This automatically clear DR7.GD. */
4571VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4572{
4573 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4574 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4575 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4576}
4577
4578
4579/** \#BR - 05. */
4580VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4581{
4582 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4583}
4584
4585
4586/** \#UD - 06. */
4587VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4588{
4589 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4590}
4591
4592
4593#ifdef IEM_WITH_SETJMP
4594/** \#UD - 06. */
4595DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4596{
4597 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4598}
4599#endif
4600
4601
4602/** \#NM - 07. */
4603VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4604{
4605 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4606}
4607
4608
4609#ifdef IEM_WITH_SETJMP
4610/** \#NM - 07. */
4611DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4612{
4613 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4614}
4615#endif
4616
4617
4618/** \#TS(err) - 0a. */
4619VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4620{
4621 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4622}
4623
4624
4625/** \#TS(tr) - 0a. */
4626VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4627{
4628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4629 pVCpu->cpum.GstCtx.tr.Sel, 0);
4630}
4631
4632
4633/** \#TS(0) - 0a. */
4634VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4635{
4636 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4637 0, 0);
4638}
4639
4640
4641/** \#TS(err) - 0a. */
4642VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4643{
4644 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4645 uSel & X86_SEL_MASK_OFF_RPL, 0);
4646}
4647
4648
4649/** \#NP(err) - 0b. */
4650VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4651{
4652 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4653}
4654
4655
4656/** \#NP(sel) - 0b. */
4657VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4658{
4659 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4660 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4661 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4662 uSel & ~X86_SEL_RPL, 0);
4663}
4664
4665
4666/** \#SS(seg) - 0c. */
4667VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4668{
4669 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4670 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4671 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4672 uSel & ~X86_SEL_RPL, 0);
4673}
4674
4675
4676/** \#SS(err) - 0c. */
4677VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4678{
4679 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4680 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4681 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4682}
4683
4684
4685/** \#GP(n) - 0d. */
4686VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4687{
4688 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4690}
4691
4692
4693/** \#GP(0) - 0d. */
4694VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4695{
4696 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4698}
4699
4700#ifdef IEM_WITH_SETJMP
4701/** \#GP(0) - 0d. */
4702DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4703{
4704 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4705 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4706}
4707#endif
4708
4709
4710/** \#GP(sel) - 0d. */
4711VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4712{
4713 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4714 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4715 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4716 Sel & ~X86_SEL_RPL, 0);
4717}
4718
4719
4720/** \#GP(0) - 0d. */
4721VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4722{
4723 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4724 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4725}
4726
4727
4728/** \#GP(sel) - 0d. */
4729VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4730{
4731 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4732 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4733 NOREF(iSegReg); NOREF(fAccess);
4734 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4735 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4736}
4737
4738#ifdef IEM_WITH_SETJMP
4739/** \#GP(sel) - 0d, longjmp. */
4740DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4741{
4742 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4743 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4744 NOREF(iSegReg); NOREF(fAccess);
4745 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4746 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4747}
4748#endif
4749
4750/** \#GP(sel) - 0d. */
4751VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4752{
4753 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4754 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4755 NOREF(Sel);
4756 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4757}
4758
4759#ifdef IEM_WITH_SETJMP
4760/** \#GP(sel) - 0d, longjmp. */
4761DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4762{
4763 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4764 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4765 NOREF(Sel);
4766 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4767}
4768#endif
4769
4770
4771/** \#GP(sel) - 0d. */
4772VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4773{
4774 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4776 NOREF(iSegReg); NOREF(fAccess);
4777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4778}
4779
4780#ifdef IEM_WITH_SETJMP
4781/** \#GP(sel) - 0d, longjmp. */
4782DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4783{
4784 NOREF(iSegReg); NOREF(fAccess);
4785 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4786}
4787#endif
4788
4789
4790/** \#PF(n) - 0e. */
4791VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4792{
4793 uint16_t uErr;
4794 switch (rc)
4795 {
4796 case VERR_PAGE_NOT_PRESENT:
4797 case VERR_PAGE_TABLE_NOT_PRESENT:
4798 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4799 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4800 uErr = 0;
4801 break;
4802
4803 case VERR_RESERVED_PAGE_TABLE_BITS:
4804 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4805 break;
4806
4807 default:
4808 AssertMsgFailed(("%Rrc\n", rc));
4809 RT_FALL_THRU();
4810 case VERR_ACCESS_DENIED:
4811 uErr = X86_TRAP_PF_P;
4812 break;
4813 }
4814
4815 if (IEM_GET_CPL(pVCpu) == 3)
4816 uErr |= X86_TRAP_PF_US;
4817
4818 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4819 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4820 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4821 uErr |= X86_TRAP_PF_ID;
4822
4823#if 0 /* This is so much non-sense, really. Why was it done like that? */
4824 /* Note! RW access callers reporting a WRITE protection fault, will clear
4825 the READ flag before calling. So, read-modify-write accesses (RW)
4826 can safely be reported as READ faults. */
4827 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4828 uErr |= X86_TRAP_PF_RW;
4829#else
4830 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4831 {
4832 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4833 /// (regardless of outcome of the comparison in the latter case).
4834 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4835 uErr |= X86_TRAP_PF_RW;
4836 }
4837#endif
4838
4839 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4840 of the memory operand rather than at the start of it. (Not sure what
4841 happens if it crosses a page boundrary.) The current heuristics for
4842 this is to report the #PF for the last byte if the access is more than
4843 64 bytes. This is probably not correct, but we can work that out later,
4844 main objective now is to get FXSAVE to work like for real hardware and
4845 make bs3-cpu-basic2 work. */
4846 if (cbAccess <= 64)
4847 { /* likely*/ }
4848 else
4849 GCPtrWhere += cbAccess - 1;
4850
4851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4852 uErr, GCPtrWhere);
4853}
4854
4855#ifdef IEM_WITH_SETJMP
4856/** \#PF(n) - 0e, longjmp. */
4857DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4858 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4859{
4860 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4861}
4862#endif
4863
4864
4865/** \#MF(0) - 10. */
4866VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4867{
4868 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4869 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4870
4871 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4872 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4873 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4874}
4875
4876#ifdef IEM_WITH_SETJMP
4877/** \#MF(0) - 10, longjmp. */
4878DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4879{
4880 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4881}
4882#endif
4883
4884
4885/** \#AC(0) - 11. */
4886VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4887{
4888 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4889}
4890
4891#ifdef IEM_WITH_SETJMP
4892/** \#AC(0) - 11, longjmp. */
4893DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4894{
4895 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4896}
4897#endif
4898
4899
4900/** \#XF(0)/\#XM(0) - 19. */
4901VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4902{
4903 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4904}
4905
4906
4907#ifdef IEM_WITH_SETJMP
4908/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4909DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4910{
4911 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4912}
4913#endif
4914
4915
4916/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4917IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4918{
4919 NOREF(cbInstr);
4920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4921}
4922
4923
4924/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4925IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4926{
4927 NOREF(cbInstr);
4928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4929}
4930
4931
4932/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4933IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4934{
4935 NOREF(cbInstr);
4936 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4937}
4938
4939
4940/** @} */
4941
4942/** @name Common opcode decoders.
4943 * @{
4944 */
4945//#include <iprt/mem.h>
4946
4947/**
4948 * Used to add extra details about a stub case.
4949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4950 */
4951void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4952{
4953#if defined(LOG_ENABLED) && defined(IN_RING3)
4954 PVM pVM = pVCpu->CTX_SUFF(pVM);
4955 char szRegs[4096];
4956 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4957 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4958 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4959 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4960 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4961 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4962 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4963 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4964 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4965 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4966 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4967 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4968 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4969 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4970 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4971 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4972 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4973 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4974 " efer=%016VR{efer}\n"
4975 " pat=%016VR{pat}\n"
4976 " sf_mask=%016VR{sf_mask}\n"
4977 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4978 " lstar=%016VR{lstar}\n"
4979 " star=%016VR{star} cstar=%016VR{cstar}\n"
4980 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4981 );
4982
4983 char szInstr[256];
4984 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4985 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4986 szInstr, sizeof(szInstr), NULL);
4987
4988 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4989#else
4990 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4991#endif
4992}
4993
4994/** @} */
4995
4996
4997
4998/** @name Register Access.
4999 * @{
5000 */
5001
5002/**
5003 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5004 *
5005 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5006 * segment limit.
5007 *
5008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5009 * @param cbInstr Instruction size.
5010 * @param offNextInstr The offset of the next instruction.
5011 * @param enmEffOpSize Effective operand size.
5012 */
5013VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5014 IEMMODE enmEffOpSize) RT_NOEXCEPT
5015{
5016 switch (enmEffOpSize)
5017 {
5018 case IEMMODE_16BIT:
5019 {
5020 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
5021 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5022 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5023 pVCpu->cpum.GstCtx.rip = uNewIp;
5024 else
5025 return iemRaiseGeneralProtectionFault0(pVCpu);
5026 break;
5027 }
5028
5029 case IEMMODE_32BIT:
5030 {
5031 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5032 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5033
5034 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5035 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5036 pVCpu->cpum.GstCtx.rip = uNewEip;
5037 else
5038 return iemRaiseGeneralProtectionFault0(pVCpu);
5039 break;
5040 }
5041
5042 case IEMMODE_64BIT:
5043 {
5044 Assert(IEM_IS_64BIT_CODE(pVCpu));
5045
5046 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5047 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5048 pVCpu->cpum.GstCtx.rip = uNewRip;
5049 else
5050 return iemRaiseGeneralProtectionFault0(pVCpu);
5051 break;
5052 }
5053
5054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5055 }
5056
5057#ifndef IEM_WITH_CODE_TLB
5058 /* Flush the prefetch buffer. */
5059 pVCpu->iem.s.cbOpcode = cbInstr;
5060#endif
5061
5062 /*
5063 * Clear RF and finish the instruction (maybe raise #DB).
5064 */
5065 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5066}
5067
5068
5069/**
5070 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5071 *
5072 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5073 * segment limit.
5074 *
5075 * @returns Strict VBox status code.
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 * @param cbInstr Instruction size.
5078 * @param offNextInstr The offset of the next instruction.
5079 */
5080VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5081{
5082 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5083
5084 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5085 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5086 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5087 pVCpu->cpum.GstCtx.rip = uNewIp;
5088 else
5089 return iemRaiseGeneralProtectionFault0(pVCpu);
5090
5091#ifndef IEM_WITH_CODE_TLB
5092 /* Flush the prefetch buffer. */
5093 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5094#endif
5095
5096 /*
5097 * Clear RF and finish the instruction (maybe raise #DB).
5098 */
5099 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5100}
5101
5102
5103/**
5104 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5105 *
5106 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5107 * segment limit.
5108 *
5109 * @returns Strict VBox status code.
5110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5111 * @param cbInstr Instruction size.
5112 * @param offNextInstr The offset of the next instruction.
5113 * @param enmEffOpSize Effective operand size.
5114 */
5115VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5116 IEMMODE enmEffOpSize) RT_NOEXCEPT
5117{
5118 if (enmEffOpSize == IEMMODE_32BIT)
5119 {
5120 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5121
5122 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5123 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5124 pVCpu->cpum.GstCtx.rip = uNewEip;
5125 else
5126 return iemRaiseGeneralProtectionFault0(pVCpu);
5127 }
5128 else
5129 {
5130 Assert(enmEffOpSize == IEMMODE_64BIT);
5131
5132 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5133 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5134 pVCpu->cpum.GstCtx.rip = uNewRip;
5135 else
5136 return iemRaiseGeneralProtectionFault0(pVCpu);
5137 }
5138
5139#ifndef IEM_WITH_CODE_TLB
5140 /* Flush the prefetch buffer. */
5141 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5142#endif
5143
5144 /*
5145 * Clear RF and finish the instruction (maybe raise #DB).
5146 */
5147 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5148}
5149
5150/** @} */
5151
5152
5153/** @name FPU access and helpers.
5154 *
5155 * @{
5156 */
5157
5158/**
5159 * Updates the x87.DS and FPUDP registers.
5160 *
5161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5162 * @param pFpuCtx The FPU context.
5163 * @param iEffSeg The effective segment register.
5164 * @param GCPtrEff The effective address relative to @a iEffSeg.
5165 */
5166DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5167{
5168 RTSEL sel;
5169 switch (iEffSeg)
5170 {
5171 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5172 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5173 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5174 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5175 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5176 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5177 default:
5178 AssertMsgFailed(("%d\n", iEffSeg));
5179 sel = pVCpu->cpum.GstCtx.ds.Sel;
5180 }
5181 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5182 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5183 {
5184 pFpuCtx->DS = 0;
5185 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5186 }
5187 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5188 {
5189 pFpuCtx->DS = sel;
5190 pFpuCtx->FPUDP = GCPtrEff;
5191 }
5192 else
5193 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5194}
5195
5196
5197/**
5198 * Rotates the stack registers in the push direction.
5199 *
5200 * @param pFpuCtx The FPU context.
5201 * @remarks This is a complete waste of time, but fxsave stores the registers in
5202 * stack order.
5203 */
5204DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5205{
5206 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5207 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5208 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5209 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5210 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5211 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5212 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5213 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5214 pFpuCtx->aRegs[0].r80 = r80Tmp;
5215}
5216
5217
5218/**
5219 * Rotates the stack registers in the pop direction.
5220 *
5221 * @param pFpuCtx The FPU context.
5222 * @remarks This is a complete waste of time, but fxsave stores the registers in
5223 * stack order.
5224 */
5225DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5226{
5227 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5228 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5229 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5230 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5231 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5232 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5233 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5234 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5235 pFpuCtx->aRegs[7].r80 = r80Tmp;
5236}
5237
5238
5239/**
5240 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5241 * exception prevents it.
5242 *
5243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5244 * @param pResult The FPU operation result to push.
5245 * @param pFpuCtx The FPU context.
5246 */
5247static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5248{
5249 /* Update FSW and bail if there are pending exceptions afterwards. */
5250 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5251 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5252 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5253 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5254 {
5255 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5256 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5257 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5258 pFpuCtx->FSW = fFsw;
5259 return;
5260 }
5261
5262 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5263 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5264 {
5265 /* All is fine, push the actual value. */
5266 pFpuCtx->FTW |= RT_BIT(iNewTop);
5267 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5268 }
5269 else if (pFpuCtx->FCW & X86_FCW_IM)
5270 {
5271 /* Masked stack overflow, push QNaN. */
5272 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5273 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5274 }
5275 else
5276 {
5277 /* Raise stack overflow, don't push anything. */
5278 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5279 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5280 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5281 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5282 return;
5283 }
5284
5285 fFsw &= ~X86_FSW_TOP_MASK;
5286 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5287 pFpuCtx->FSW = fFsw;
5288
5289 iemFpuRotateStackPush(pFpuCtx);
5290 RT_NOREF(pVCpu);
5291}
5292
5293
5294/**
5295 * Stores a result in a FPU register and updates the FSW and FTW.
5296 *
5297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5298 * @param pFpuCtx The FPU context.
5299 * @param pResult The result to store.
5300 * @param iStReg Which FPU register to store it in.
5301 */
5302static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5303{
5304 Assert(iStReg < 8);
5305 uint16_t fNewFsw = pFpuCtx->FSW;
5306 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5307 fNewFsw &= ~X86_FSW_C_MASK;
5308 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5309 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5310 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5311 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5312 pFpuCtx->FSW = fNewFsw;
5313 pFpuCtx->FTW |= RT_BIT(iReg);
5314 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5315 RT_NOREF(pVCpu);
5316}
5317
5318
5319/**
5320 * Only updates the FPU status word (FSW) with the result of the current
5321 * instruction.
5322 *
5323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5324 * @param pFpuCtx The FPU context.
5325 * @param u16FSW The FSW output of the current instruction.
5326 */
5327static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5328{
5329 uint16_t fNewFsw = pFpuCtx->FSW;
5330 fNewFsw &= ~X86_FSW_C_MASK;
5331 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5332 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5333 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5334 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5335 pFpuCtx->FSW = fNewFsw;
5336 RT_NOREF(pVCpu);
5337}
5338
5339
5340/**
5341 * Pops one item off the FPU stack if no pending exception prevents it.
5342 *
5343 * @param pFpuCtx The FPU context.
5344 */
5345static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5346{
5347 /* Check pending exceptions. */
5348 uint16_t uFSW = pFpuCtx->FSW;
5349 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5350 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5351 return;
5352
5353 /* TOP--. */
5354 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5355 uFSW &= ~X86_FSW_TOP_MASK;
5356 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5357 pFpuCtx->FSW = uFSW;
5358
5359 /* Mark the previous ST0 as empty. */
5360 iOldTop >>= X86_FSW_TOP_SHIFT;
5361 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5362
5363 /* Rotate the registers. */
5364 iemFpuRotateStackPop(pFpuCtx);
5365}
5366
5367
5368/**
5369 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5370 *
5371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5372 * @param pResult The FPU operation result to push.
5373 * @param uFpuOpcode The FPU opcode value.
5374 */
5375void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5376{
5377 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5378 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5379 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5380}
5381
5382
5383/**
5384 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5385 * and sets FPUDP and FPUDS.
5386 *
5387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5388 * @param pResult The FPU operation result to push.
5389 * @param iEffSeg The effective segment register.
5390 * @param GCPtrEff The effective address relative to @a iEffSeg.
5391 * @param uFpuOpcode The FPU opcode value.
5392 */
5393void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5394 uint16_t uFpuOpcode) RT_NOEXCEPT
5395{
5396 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5397 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5398 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5399 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5400}
5401
5402
5403/**
5404 * Replace ST0 with the first value and push the second onto the FPU stack,
5405 * unless a pending exception prevents it.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param pResult The FPU operation result to store and push.
5409 * @param uFpuOpcode The FPU opcode value.
5410 */
5411void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5412{
5413 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5414 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5415
5416 /* Update FSW and bail if there are pending exceptions afterwards. */
5417 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5418 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5419 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5420 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5421 {
5422 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5423 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5424 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5425 pFpuCtx->FSW = fFsw;
5426 return;
5427 }
5428
5429 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5430 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5431 {
5432 /* All is fine, push the actual value. */
5433 pFpuCtx->FTW |= RT_BIT(iNewTop);
5434 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5435 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5436 }
5437 else if (pFpuCtx->FCW & X86_FCW_IM)
5438 {
5439 /* Masked stack overflow, push QNaN. */
5440 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5441 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5442 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5443 }
5444 else
5445 {
5446 /* Raise stack overflow, don't push anything. */
5447 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5448 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5449 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5450 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5451 return;
5452 }
5453
5454 fFsw &= ~X86_FSW_TOP_MASK;
5455 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5456 pFpuCtx->FSW = fFsw;
5457
5458 iemFpuRotateStackPush(pFpuCtx);
5459}
5460
5461
5462/**
5463 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5464 * FOP.
5465 *
5466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5467 * @param pResult The result to store.
5468 * @param iStReg Which FPU register to store it in.
5469 * @param uFpuOpcode The FPU opcode value.
5470 */
5471void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5472{
5473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5474 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5475 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5476}
5477
5478
5479/**
5480 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5481 * FOP, and then pops the stack.
5482 *
5483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5484 * @param pResult The result to store.
5485 * @param iStReg Which FPU register to store it in.
5486 * @param uFpuOpcode The FPU opcode value.
5487 */
5488void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5489{
5490 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5491 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5492 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5493 iemFpuMaybePopOne(pFpuCtx);
5494}
5495
5496
5497/**
5498 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5499 * FPUDP, and FPUDS.
5500 *
5501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5502 * @param pResult The result to store.
5503 * @param iStReg Which FPU register to store it in.
5504 * @param iEffSeg The effective memory operand selector register.
5505 * @param GCPtrEff The effective memory operand offset.
5506 * @param uFpuOpcode The FPU opcode value.
5507 */
5508void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5509 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5510{
5511 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5512 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5513 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5514 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5515}
5516
5517
5518/**
5519 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5520 * FPUDP, and FPUDS, and then pops the stack.
5521 *
5522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5523 * @param pResult The result to store.
5524 * @param iStReg Which FPU register to store it in.
5525 * @param iEffSeg The effective memory operand selector register.
5526 * @param GCPtrEff The effective memory operand offset.
5527 * @param uFpuOpcode The FPU opcode value.
5528 */
5529void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5530 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5531{
5532 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5533 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5534 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5535 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5536 iemFpuMaybePopOne(pFpuCtx);
5537}
5538
5539
5540/**
5541 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5542 *
5543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5544 * @param uFpuOpcode The FPU opcode value.
5545 */
5546void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5547{
5548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5549 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5550}
5551
5552
5553/**
5554 * Updates the FSW, FOP, FPUIP, and FPUCS.
5555 *
5556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5557 * @param u16FSW The FSW from the current instruction.
5558 * @param uFpuOpcode The FPU opcode value.
5559 */
5560void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5561{
5562 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5563 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5564 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5565}
5566
5567
5568/**
5569 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5570 *
5571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5572 * @param u16FSW The FSW from the current instruction.
5573 * @param uFpuOpcode The FPU opcode value.
5574 */
5575void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5576{
5577 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5578 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5579 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5580 iemFpuMaybePopOne(pFpuCtx);
5581}
5582
5583
5584/**
5585 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5586 *
5587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5588 * @param u16FSW The FSW from the current instruction.
5589 * @param iEffSeg The effective memory operand selector register.
5590 * @param GCPtrEff The effective memory operand offset.
5591 * @param uFpuOpcode The FPU opcode value.
5592 */
5593void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5594{
5595 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5596 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5597 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5598 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5599}
5600
5601
5602/**
5603 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5604 *
5605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5606 * @param u16FSW The FSW from the current instruction.
5607 * @param uFpuOpcode The FPU opcode value.
5608 */
5609void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5610{
5611 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5612 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5613 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5614 iemFpuMaybePopOne(pFpuCtx);
5615 iemFpuMaybePopOne(pFpuCtx);
5616}
5617
5618
5619/**
5620 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5621 *
5622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5623 * @param u16FSW The FSW from the current instruction.
5624 * @param iEffSeg The effective memory operand selector register.
5625 * @param GCPtrEff The effective memory operand offset.
5626 * @param uFpuOpcode The FPU opcode value.
5627 */
5628void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5629 uint16_t uFpuOpcode) RT_NOEXCEPT
5630{
5631 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5632 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5633 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5634 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5635 iemFpuMaybePopOne(pFpuCtx);
5636}
5637
5638
5639/**
5640 * Worker routine for raising an FPU stack underflow exception.
5641 *
5642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5643 * @param pFpuCtx The FPU context.
5644 * @param iStReg The stack register being accessed.
5645 */
5646static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5647{
5648 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5649 if (pFpuCtx->FCW & X86_FCW_IM)
5650 {
5651 /* Masked underflow. */
5652 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5654 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5655 if (iStReg != UINT8_MAX)
5656 {
5657 pFpuCtx->FTW |= RT_BIT(iReg);
5658 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5659 }
5660 }
5661 else
5662 {
5663 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5664 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5665 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5666 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5667 }
5668 RT_NOREF(pVCpu);
5669}
5670
5671
5672/**
5673 * Raises a FPU stack underflow exception.
5674 *
5675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5676 * @param iStReg The destination register that should be loaded
5677 * with QNaN if \#IS is not masked. Specify
5678 * UINT8_MAX if none (like for fcom).
5679 * @param uFpuOpcode The FPU opcode value.
5680 */
5681void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5682{
5683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5684 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5685 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5686}
5687
5688
5689void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5690{
5691 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5692 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5693 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5694 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5695}
5696
5697
5698void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5699{
5700 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5701 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5702 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5703 iemFpuMaybePopOne(pFpuCtx);
5704}
5705
5706
5707void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5708 uint16_t uFpuOpcode) RT_NOEXCEPT
5709{
5710 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5711 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5712 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5713 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5714 iemFpuMaybePopOne(pFpuCtx);
5715}
5716
5717
5718void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5719{
5720 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5721 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5722 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5723 iemFpuMaybePopOne(pFpuCtx);
5724 iemFpuMaybePopOne(pFpuCtx);
5725}
5726
5727
5728void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5729{
5730 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5731 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5732
5733 if (pFpuCtx->FCW & X86_FCW_IM)
5734 {
5735 /* Masked overflow - Push QNaN. */
5736 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5737 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5738 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5739 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5740 pFpuCtx->FTW |= RT_BIT(iNewTop);
5741 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5742 iemFpuRotateStackPush(pFpuCtx);
5743 }
5744 else
5745 {
5746 /* Exception pending - don't change TOP or the register stack. */
5747 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5748 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5749 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5750 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5751 }
5752}
5753
5754
5755void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5756{
5757 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5758 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5759
5760 if (pFpuCtx->FCW & X86_FCW_IM)
5761 {
5762 /* Masked overflow - Push QNaN. */
5763 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5764 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5765 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5766 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5767 pFpuCtx->FTW |= RT_BIT(iNewTop);
5768 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5769 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5770 iemFpuRotateStackPush(pFpuCtx);
5771 }
5772 else
5773 {
5774 /* Exception pending - don't change TOP or the register stack. */
5775 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5776 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5777 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5778 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5779 }
5780}
5781
5782
5783/**
5784 * Worker routine for raising an FPU stack overflow exception on a push.
5785 *
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param pFpuCtx The FPU context.
5788 */
5789static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5790{
5791 if (pFpuCtx->FCW & X86_FCW_IM)
5792 {
5793 /* Masked overflow. */
5794 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5795 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5796 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5797 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5798 pFpuCtx->FTW |= RT_BIT(iNewTop);
5799 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5800 iemFpuRotateStackPush(pFpuCtx);
5801 }
5802 else
5803 {
5804 /* Exception pending - don't change TOP or the register stack. */
5805 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5806 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5807 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5808 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5809 }
5810 RT_NOREF(pVCpu);
5811}
5812
5813
5814/**
5815 * Raises a FPU stack overflow exception on a push.
5816 *
5817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5818 * @param uFpuOpcode The FPU opcode value.
5819 */
5820void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5821{
5822 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5823 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5824 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5825}
5826
5827
5828/**
5829 * Raises a FPU stack overflow exception on a push with a memory operand.
5830 *
5831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5832 * @param iEffSeg The effective memory operand selector register.
5833 * @param GCPtrEff The effective memory operand offset.
5834 * @param uFpuOpcode The FPU opcode value.
5835 */
5836void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5837{
5838 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5839 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5840 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5841 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5842}
5843
5844/** @} */
5845
5846
5847/** @name Memory access.
5848 *
5849 * @{
5850 */
5851
5852#undef LOG_GROUP
5853#define LOG_GROUP LOG_GROUP_IEM_MEM
5854
5855/**
5856 * Updates the IEMCPU::cbWritten counter if applicable.
5857 *
5858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5859 * @param fAccess The access being accounted for.
5860 * @param cbMem The access size.
5861 */
5862DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5863{
5864 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5865 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5866 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5867}
5868
5869
5870/**
5871 * Applies the segment limit, base and attributes.
5872 *
5873 * This may raise a \#GP or \#SS.
5874 *
5875 * @returns VBox strict status code.
5876 *
5877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5878 * @param fAccess The kind of access which is being performed.
5879 * @param iSegReg The index of the segment register to apply.
5880 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5881 * TSS, ++).
5882 * @param cbMem The access size.
5883 * @param pGCPtrMem Pointer to the guest memory address to apply
5884 * segmentation to. Input and output parameter.
5885 */
5886VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5887{
5888 if (iSegReg == UINT8_MAX)
5889 return VINF_SUCCESS;
5890
5891 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5892 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5893 switch (IEM_GET_CPU_MODE(pVCpu))
5894 {
5895 case IEMMODE_16BIT:
5896 case IEMMODE_32BIT:
5897 {
5898 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5899 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5900
5901 if ( pSel->Attr.n.u1Present
5902 && !pSel->Attr.n.u1Unusable)
5903 {
5904 Assert(pSel->Attr.n.u1DescType);
5905 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5906 {
5907 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5908 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5909 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5910
5911 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5912 {
5913 /** @todo CPL check. */
5914 }
5915
5916 /*
5917 * There are two kinds of data selectors, normal and expand down.
5918 */
5919 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5920 {
5921 if ( GCPtrFirst32 > pSel->u32Limit
5922 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5923 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5924 }
5925 else
5926 {
5927 /*
5928 * The upper boundary is defined by the B bit, not the G bit!
5929 */
5930 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5931 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5932 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5933 }
5934 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5935 }
5936 else
5937 {
5938 /*
5939 * Code selector and usually be used to read thru, writing is
5940 * only permitted in real and V8086 mode.
5941 */
5942 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5943 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5944 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5945 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5946 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5947
5948 if ( GCPtrFirst32 > pSel->u32Limit
5949 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5950 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5951
5952 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5953 {
5954 /** @todo CPL check. */
5955 }
5956
5957 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5958 }
5959 }
5960 else
5961 return iemRaiseGeneralProtectionFault0(pVCpu);
5962 return VINF_SUCCESS;
5963 }
5964
5965 case IEMMODE_64BIT:
5966 {
5967 RTGCPTR GCPtrMem = *pGCPtrMem;
5968 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5969 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5970
5971 Assert(cbMem >= 1);
5972 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5973 return VINF_SUCCESS;
5974 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5975 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5976 return iemRaiseGeneralProtectionFault0(pVCpu);
5977 }
5978
5979 default:
5980 AssertFailedReturn(VERR_IEM_IPE_7);
5981 }
5982}
5983
5984
5985/**
5986 * Translates a virtual address to a physical physical address and checks if we
5987 * can access the page as specified.
5988 *
5989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5990 * @param GCPtrMem The virtual address.
5991 * @param cbAccess The access size, for raising \#PF correctly for
5992 * FXSAVE and such.
5993 * @param fAccess The intended access.
5994 * @param pGCPhysMem Where to return the physical address.
5995 */
5996VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5997 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5998{
5999 /** @todo Need a different PGM interface here. We're currently using
6000 * generic / REM interfaces. this won't cut it for R0. */
6001 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
6002 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
6003 * here. */
6004 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6005 PGMPTWALKFAST WalkFast;
6006 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6007 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6008 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6009 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6010 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6011 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6012 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6013 fQPage |= PGMQPAGE_F_USER_MODE;
6014 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6015 if (RT_SUCCESS(rc))
6016 {
6017 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6018
6019 /* If the page is writable and does not have the no-exec bit set, all
6020 access is allowed. Otherwise we'll have to check more carefully... */
6021 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6022 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6023 || (WalkFast.fEffective & X86_PTE_RW)
6024 || ( ( IEM_GET_CPL(pVCpu) != 3
6025 || (fAccess & IEM_ACCESS_WHAT_SYS))
6026 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6027 && ( (WalkFast.fEffective & X86_PTE_US)
6028 || IEM_GET_CPL(pVCpu) != 3
6029 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6030 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6031 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6032 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6033 )
6034 );
6035
6036 /* PGMGstQueryPageFast sets the A & D bits. */
6037 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6038 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6039
6040 *pGCPhysMem = WalkFast.GCPhys;
6041 return VINF_SUCCESS;
6042 }
6043
6044 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6045 /** @todo Check unassigned memory in unpaged mode. */
6046#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6047 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6048 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6049#endif
6050 *pGCPhysMem = NIL_RTGCPHYS;
6051 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6052}
6053
6054#if 0 /*unused*/
6055/**
6056 * Looks up a memory mapping entry.
6057 *
6058 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6060 * @param pvMem The memory address.
6061 * @param fAccess The access to.
6062 */
6063DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6064{
6065 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6066 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6067 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6068 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6069 return 0;
6070 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6071 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6072 return 1;
6073 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6074 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6075 return 2;
6076 return VERR_NOT_FOUND;
6077}
6078#endif
6079
6080/**
6081 * Finds a free memmap entry when using iNextMapping doesn't work.
6082 *
6083 * @returns Memory mapping index, 1024 on failure.
6084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6085 */
6086static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6087{
6088 /*
6089 * The easy case.
6090 */
6091 if (pVCpu->iem.s.cActiveMappings == 0)
6092 {
6093 pVCpu->iem.s.iNextMapping = 1;
6094 return 0;
6095 }
6096
6097 /* There should be enough mappings for all instructions. */
6098 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6099
6100 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6101 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6102 return i;
6103
6104 AssertFailedReturn(1024);
6105}
6106
6107
6108/**
6109 * Commits a bounce buffer that needs writing back and unmaps it.
6110 *
6111 * @returns Strict VBox status code.
6112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6113 * @param iMemMap The index of the buffer to commit.
6114 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6115 * Always false in ring-3, obviously.
6116 */
6117static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6118{
6119 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6120 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6121#ifdef IN_RING3
6122 Assert(!fPostponeFail);
6123 RT_NOREF_PV(fPostponeFail);
6124#endif
6125
6126 /*
6127 * Do the writing.
6128 */
6129 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6130 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6131 {
6132 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6133 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6134 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6135 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6136 {
6137 /*
6138 * Carefully and efficiently dealing with access handler return
6139 * codes make this a little bloated.
6140 */
6141 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6142 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6143 pbBuf,
6144 cbFirst,
6145 PGMACCESSORIGIN_IEM);
6146 if (rcStrict == VINF_SUCCESS)
6147 {
6148 if (cbSecond)
6149 {
6150 rcStrict = PGMPhysWrite(pVM,
6151 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6152 pbBuf + cbFirst,
6153 cbSecond,
6154 PGMACCESSORIGIN_IEM);
6155 if (rcStrict == VINF_SUCCESS)
6156 { /* nothing */ }
6157 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6158 {
6159 LogEx(LOG_GROUP_IEM,
6160 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6163 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6164 }
6165#ifndef IN_RING3
6166 else if (fPostponeFail)
6167 {
6168 LogEx(LOG_GROUP_IEM,
6169 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6171 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6172 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6173 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6174 return iemSetPassUpStatus(pVCpu, rcStrict);
6175 }
6176#endif
6177 else
6178 {
6179 LogEx(LOG_GROUP_IEM,
6180 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6181 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6182 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6183 return rcStrict;
6184 }
6185 }
6186 }
6187 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6188 {
6189 if (!cbSecond)
6190 {
6191 LogEx(LOG_GROUP_IEM,
6192 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6194 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6195 }
6196 else
6197 {
6198 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6199 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6200 pbBuf + cbFirst,
6201 cbSecond,
6202 PGMACCESSORIGIN_IEM);
6203 if (rcStrict2 == VINF_SUCCESS)
6204 {
6205 LogEx(LOG_GROUP_IEM,
6206 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6207 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6208 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6209 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6210 }
6211 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6212 {
6213 LogEx(LOG_GROUP_IEM,
6214 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6217 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6218 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6219 }
6220#ifndef IN_RING3
6221 else if (fPostponeFail)
6222 {
6223 LogEx(LOG_GROUP_IEM,
6224 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6225 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6227 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6228 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6229 return iemSetPassUpStatus(pVCpu, rcStrict);
6230 }
6231#endif
6232 else
6233 {
6234 LogEx(LOG_GROUP_IEM,
6235 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6236 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6238 return rcStrict2;
6239 }
6240 }
6241 }
6242#ifndef IN_RING3
6243 else if (fPostponeFail)
6244 {
6245 LogEx(LOG_GROUP_IEM,
6246 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6247 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6248 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6249 if (!cbSecond)
6250 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6251 else
6252 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6253 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6254 return iemSetPassUpStatus(pVCpu, rcStrict);
6255 }
6256#endif
6257 else
6258 {
6259 LogEx(LOG_GROUP_IEM,
6260 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6261 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6262 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6263 return rcStrict;
6264 }
6265 }
6266 else
6267 {
6268 /*
6269 * No access handlers, much simpler.
6270 */
6271 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6272 if (RT_SUCCESS(rc))
6273 {
6274 if (cbSecond)
6275 {
6276 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6277 if (RT_SUCCESS(rc))
6278 { /* likely */ }
6279 else
6280 {
6281 LogEx(LOG_GROUP_IEM,
6282 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6283 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6284 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6285 return rc;
6286 }
6287 }
6288 }
6289 else
6290 {
6291 LogEx(LOG_GROUP_IEM,
6292 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6293 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6295 return rc;
6296 }
6297 }
6298 }
6299
6300#if defined(IEM_LOG_MEMORY_WRITES)
6301 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6302 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6303 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6304 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6305 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6306 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6307
6308 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6309 g_cbIemWrote = cbWrote;
6310 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6311#endif
6312
6313 /*
6314 * Free the mapping entry.
6315 */
6316 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6317 Assert(pVCpu->iem.s.cActiveMappings != 0);
6318 pVCpu->iem.s.cActiveMappings--;
6319 return VINF_SUCCESS;
6320}
6321
6322
6323/**
6324 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6325 */
6326DECL_FORCE_INLINE(uint32_t)
6327iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6328{
6329 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6330 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6331 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6332 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6333}
6334
6335
6336/**
6337 * iemMemMap worker that deals with a request crossing pages.
6338 */
6339static VBOXSTRICTRC
6340iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6341 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6342{
6343 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6344 Assert(cbMem <= GUEST_PAGE_SIZE);
6345
6346 /*
6347 * Do the address translations.
6348 */
6349 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6350 RTGCPHYS GCPhysFirst;
6351 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6352 if (rcStrict != VINF_SUCCESS)
6353 return rcStrict;
6354 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6355
6356 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6357 RTGCPHYS GCPhysSecond;
6358 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6359 cbSecondPage, fAccess, &GCPhysSecond);
6360 if (rcStrict != VINF_SUCCESS)
6361 return rcStrict;
6362 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6363 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6364
6365 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6366
6367 /*
6368 * Check for data breakpoints.
6369 */
6370 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6371 { /* likely */ }
6372 else
6373 {
6374 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6375 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6376 cbSecondPage, fAccess);
6377 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6378 if (fDataBps > 1)
6379 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6380 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6381 }
6382
6383 /*
6384 * Read in the current memory content if it's a read, execute or partial
6385 * write access.
6386 */
6387 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6388
6389 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6390 {
6391 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6392 {
6393 /*
6394 * Must carefully deal with access handler status codes here,
6395 * makes the code a bit bloated.
6396 */
6397 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6398 if (rcStrict == VINF_SUCCESS)
6399 {
6400 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6401 if (rcStrict == VINF_SUCCESS)
6402 { /*likely */ }
6403 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6404 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6405 else
6406 {
6407 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6408 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6409 return rcStrict;
6410 }
6411 }
6412 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6413 {
6414 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6415 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6416 {
6417 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6418 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6419 }
6420 else
6421 {
6422 LogEx(LOG_GROUP_IEM,
6423 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6424 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6425 return rcStrict2;
6426 }
6427 }
6428 else
6429 {
6430 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6431 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6432 return rcStrict;
6433 }
6434 }
6435 else
6436 {
6437 /*
6438 * No informational status codes here, much more straight forward.
6439 */
6440 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6441 if (RT_SUCCESS(rc))
6442 {
6443 Assert(rc == VINF_SUCCESS);
6444 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6445 if (RT_SUCCESS(rc))
6446 Assert(rc == VINF_SUCCESS);
6447 else
6448 {
6449 LogEx(LOG_GROUP_IEM,
6450 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6451 return rc;
6452 }
6453 }
6454 else
6455 {
6456 LogEx(LOG_GROUP_IEM,
6457 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6458 return rc;
6459 }
6460 }
6461 }
6462#ifdef VBOX_STRICT
6463 else
6464 memset(pbBuf, 0xcc, cbMem);
6465 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6466 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6467#endif
6468 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6469
6470 /*
6471 * Commit the bounce buffer entry.
6472 */
6473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6475 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6476 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6477 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6478 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6479 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6480 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6481 pVCpu->iem.s.cActiveMappings++;
6482
6483 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6484 *ppvMem = pbBuf;
6485 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6486 return VINF_SUCCESS;
6487}
6488
6489
6490/**
6491 * iemMemMap woker that deals with iemMemPageMap failures.
6492 */
6493static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6494 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6495{
6496 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6497
6498 /*
6499 * Filter out conditions we can handle and the ones which shouldn't happen.
6500 */
6501 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6502 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6503 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6504 {
6505 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6506 return rcMap;
6507 }
6508 pVCpu->iem.s.cPotentialExits++;
6509
6510 /*
6511 * Read in the current memory content if it's a read, execute or partial
6512 * write access.
6513 */
6514 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6515 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6516 {
6517 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6518 memset(pbBuf, 0xff, cbMem);
6519 else
6520 {
6521 int rc;
6522 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6523 {
6524 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6525 if (rcStrict == VINF_SUCCESS)
6526 { /* nothing */ }
6527 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6528 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6529 else
6530 {
6531 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6532 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6533 return rcStrict;
6534 }
6535 }
6536 else
6537 {
6538 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6539 if (RT_SUCCESS(rc))
6540 { /* likely */ }
6541 else
6542 {
6543 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6544 GCPhysFirst, rc));
6545 return rc;
6546 }
6547 }
6548 }
6549 }
6550#ifdef VBOX_STRICT
6551 else
6552 memset(pbBuf, 0xcc, cbMem);
6553#endif
6554#ifdef VBOX_STRICT
6555 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6556 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6557#endif
6558
6559 /*
6560 * Commit the bounce buffer entry.
6561 */
6562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6563 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6564 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6565 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6566 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6567 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6568 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6569 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6570 pVCpu->iem.s.cActiveMappings++;
6571
6572 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6573 *ppvMem = pbBuf;
6574 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6575 return VINF_SUCCESS;
6576}
6577
6578
6579
6580/**
6581 * Maps the specified guest memory for the given kind of access.
6582 *
6583 * This may be using bounce buffering of the memory if it's crossing a page
6584 * boundary or if there is an access handler installed for any of it. Because
6585 * of lock prefix guarantees, we're in for some extra clutter when this
6586 * happens.
6587 *
6588 * This may raise a \#GP, \#SS, \#PF or \#AC.
6589 *
6590 * @returns VBox strict status code.
6591 *
6592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6593 * @param ppvMem Where to return the pointer to the mapped memory.
6594 * @param pbUnmapInfo Where to return unmap info to be passed to
6595 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6596 * done.
6597 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6598 * 8, 12, 16, 32 or 512. When used by string operations
6599 * it can be up to a page.
6600 * @param iSegReg The index of the segment register to use for this
6601 * access. The base and limits are checked. Use UINT8_MAX
6602 * to indicate that no segmentation is required (for IDT,
6603 * GDT and LDT accesses).
6604 * @param GCPtrMem The address of the guest memory.
6605 * @param fAccess How the memory is being accessed. The
6606 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6607 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6608 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6609 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6610 * set.
6611 * @param uAlignCtl Alignment control:
6612 * - Bits 15:0 is the alignment mask.
6613 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6614 * IEM_MEMMAP_F_ALIGN_SSE, and
6615 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6616 * Pass zero to skip alignment.
6617 */
6618VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6619 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6620{
6621 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6622
6623 /*
6624 * Check the input and figure out which mapping entry to use.
6625 */
6626 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6627 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6628 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6629 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6630 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6631
6632 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6633 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6634 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6635 {
6636 iMemMap = iemMemMapFindFree(pVCpu);
6637 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6638 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6639 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6640 pVCpu->iem.s.aMemMappings[2].fAccess),
6641 VERR_IEM_IPE_9);
6642 }
6643
6644 /*
6645 * Map the memory, checking that we can actually access it. If something
6646 * slightly complicated happens, fall back on bounce buffering.
6647 */
6648 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6649 if (rcStrict == VINF_SUCCESS)
6650 { /* likely */ }
6651 else
6652 return rcStrict;
6653
6654 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6655 { /* likely */ }
6656 else
6657 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6658
6659 /*
6660 * Alignment check.
6661 */
6662 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6663 { /* likelyish */ }
6664 else
6665 {
6666 /* Misaligned access. */
6667 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6668 {
6669 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6670 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6671 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6672 {
6673 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6674
6675 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6676 { /* likely */ }
6677 else
6678 return iemRaiseAlignmentCheckException(pVCpu);
6679 }
6680 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6681 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6682 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6683 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6684 * that's what FXSAVE does on a 10980xe. */
6685 && iemMemAreAlignmentChecksEnabled(pVCpu))
6686 return iemRaiseAlignmentCheckException(pVCpu);
6687 else
6688 return iemRaiseGeneralProtectionFault0(pVCpu);
6689 }
6690
6691#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6692 /* If the access is atomic there are host platform alignmnet restrictions
6693 we need to conform with. */
6694 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6695# if defined(RT_ARCH_AMD64)
6696 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6697# elif defined(RT_ARCH_ARM64)
6698 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6699# else
6700# error port me
6701# endif
6702 )
6703 { /* okay */ }
6704 else
6705 {
6706 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6707 pVCpu->iem.s.cMisalignedAtomics += 1;
6708 return VINF_EM_EMULATE_SPLIT_LOCK;
6709 }
6710#endif
6711 }
6712
6713#ifdef IEM_WITH_DATA_TLB
6714 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6715
6716 /*
6717 * Get the TLB entry for this page and check PT flags.
6718 *
6719 * We reload the TLB entry if we need to set the dirty bit (accessed
6720 * should in theory always be set).
6721 */
6722 uint8_t *pbMem = NULL;
6723 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6724 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6725 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6726 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6727 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6728 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6729 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6730 {
6731# ifdef IEM_WITH_TLB_STATISTICS
6732 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6733#endif
6734
6735 /* If the page is either supervisor only or non-writable, we need to do
6736 more careful access checks. */
6737 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6738 {
6739 /* Write to read only memory? */
6740 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6741 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6742 && ( ( IEM_GET_CPL(pVCpu) == 3
6743 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6744 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6745 {
6746 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6747 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6748 }
6749
6750 /* Kernel memory accessed by userland? */
6751 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6752 && IEM_GET_CPL(pVCpu) == 3
6753 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6754 {
6755 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6756 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6757 }
6758 }
6759
6760 /* Look up the physical page info if necessary. */
6761 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6762# ifdef IN_RING3
6763 pbMem = pTlbe->pbMappingR3;
6764# else
6765 pbMem = NULL;
6766# endif
6767 else
6768 {
6769 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6770 { /* likely */ }
6771 else
6772 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6773 pTlbe->pbMappingR3 = NULL;
6774 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6775 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6776 &pbMem, &pTlbe->fFlagsAndPhysRev);
6777 AssertRCReturn(rc, rc);
6778# ifdef IN_RING3
6779 pTlbe->pbMappingR3 = pbMem;
6780# endif
6781 }
6782 }
6783 else
6784 {
6785 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6786
6787 /* This page table walking will set A bits as required by the access while performing the walk.
6788 ASSUMES these are set when the address is translated rather than on commit... */
6789 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6790 PGMPTWALKFAST WalkFast;
6791 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6792 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6793 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6794 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6795 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6796 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6797 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6798 fQPage |= PGMQPAGE_F_USER_MODE;
6799 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6800 if (RT_SUCCESS(rc))
6801 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6802 else
6803 {
6804 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6805# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6806 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6807 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6808# endif
6809 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6810 }
6811
6812 uint32_t fDataBps;
6813 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6814 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6815 {
6816 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6817 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6818 {
6819 pTlbe--;
6820 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6821 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6822 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6823 }
6824 else
6825 {
6826 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6827 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6828 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6829 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6830 }
6831 }
6832 else
6833 {
6834 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6835 to the page with the data access breakpoint armed on it to pass thru here. */
6836 if (fDataBps > 1)
6837 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6838 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6839 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6840 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6841 pTlbe->uTag = uTagNoRev;
6842 }
6843 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6844 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6845 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6846 pTlbe->GCPhys = GCPhysPg;
6847 pTlbe->pbMappingR3 = NULL;
6848 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6849 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6850 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6851 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6852 || IEM_GET_CPL(pVCpu) != 3
6853 || (fAccess & IEM_ACCESS_WHAT_SYS));
6854
6855 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
6856 {
6857 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
6858 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
6859 else
6860 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
6861 }
6862
6863 /* Resolve the physical address. */
6864 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6865 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6866 &pbMem, &pTlbe->fFlagsAndPhysRev);
6867 AssertRCReturn(rc, rc);
6868# ifdef IN_RING3
6869 pTlbe->pbMappingR3 = pbMem;
6870# endif
6871 }
6872
6873 /*
6874 * Check the physical page level access and mapping.
6875 */
6876 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6877 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6878 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6879 { /* probably likely */ }
6880 else
6881 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6882 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6883 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6884 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6885 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6886 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6887
6888 if (pbMem)
6889 {
6890 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6891 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6892 fAccess |= IEM_ACCESS_NOT_LOCKED;
6893 }
6894 else
6895 {
6896 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6897 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6898 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6899 if (rcStrict != VINF_SUCCESS)
6900 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6901 }
6902
6903 void * const pvMem = pbMem;
6904
6905 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6906 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6907 if (fAccess & IEM_ACCESS_TYPE_READ)
6908 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6909
6910#else /* !IEM_WITH_DATA_TLB */
6911
6912 RTGCPHYS GCPhysFirst;
6913 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6914 if (rcStrict != VINF_SUCCESS)
6915 return rcStrict;
6916
6917 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6918 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6919 if (fAccess & IEM_ACCESS_TYPE_READ)
6920 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6921
6922 void *pvMem;
6923 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6924 if (rcStrict != VINF_SUCCESS)
6925 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6926
6927#endif /* !IEM_WITH_DATA_TLB */
6928
6929 /*
6930 * Fill in the mapping table entry.
6931 */
6932 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6933 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6934 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6935 pVCpu->iem.s.cActiveMappings += 1;
6936
6937 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6938 *ppvMem = pvMem;
6939 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6940 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6941 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6942
6943 return VINF_SUCCESS;
6944}
6945
6946
6947/**
6948 * Commits the guest memory if bounce buffered and unmaps it.
6949 *
6950 * @returns Strict VBox status code.
6951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6952 * @param bUnmapInfo Unmap info set by iemMemMap.
6953 */
6954VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6955{
6956 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6957 AssertMsgReturn( (bUnmapInfo & 0x08)
6958 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6959 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6960 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6961 VERR_NOT_FOUND);
6962
6963 /* If it's bounce buffered, we may need to write back the buffer. */
6964 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6965 {
6966 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6967 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6968 }
6969 /* Otherwise unlock it. */
6970 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6971 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6972
6973 /* Free the entry. */
6974 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6975 Assert(pVCpu->iem.s.cActiveMappings != 0);
6976 pVCpu->iem.s.cActiveMappings--;
6977 return VINF_SUCCESS;
6978}
6979
6980
6981/**
6982 * Rolls back the guest memory (conceptually only) and unmaps it.
6983 *
6984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6985 * @param bUnmapInfo Unmap info set by iemMemMap.
6986 */
6987void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6988{
6989 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6990 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6991 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6992 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6993 == ((unsigned)bUnmapInfo >> 4),
6994 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6995
6996 /* Unlock it if necessary. */
6997 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6998 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6999
7000 /* Free the entry. */
7001 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7002 Assert(pVCpu->iem.s.cActiveMappings != 0);
7003 pVCpu->iem.s.cActiveMappings--;
7004}
7005
7006#ifdef IEM_WITH_SETJMP
7007
7008/**
7009 * Maps the specified guest memory for the given kind of access, longjmp on
7010 * error.
7011 *
7012 * This may be using bounce buffering of the memory if it's crossing a page
7013 * boundary or if there is an access handler installed for any of it. Because
7014 * of lock prefix guarantees, we're in for some extra clutter when this
7015 * happens.
7016 *
7017 * This may raise a \#GP, \#SS, \#PF or \#AC.
7018 *
7019 * @returns Pointer to the mapped memory.
7020 *
7021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7022 * @param bUnmapInfo Where to return unmap info to be passed to
7023 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
7024 * iemMemCommitAndUnmapWoSafeJmp,
7025 * iemMemCommitAndUnmapRoSafeJmp,
7026 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
7027 * when done.
7028 * @param cbMem The number of bytes to map. This is usually 1,
7029 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7030 * string operations it can be up to a page.
7031 * @param iSegReg The index of the segment register to use for
7032 * this access. The base and limits are checked.
7033 * Use UINT8_MAX to indicate that no segmentation
7034 * is required (for IDT, GDT and LDT accesses).
7035 * @param GCPtrMem The address of the guest memory.
7036 * @param fAccess How the memory is being accessed. The
7037 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7038 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7039 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7040 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7041 * set.
7042 * @param uAlignCtl Alignment control:
7043 * - Bits 15:0 is the alignment mask.
7044 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7045 * IEM_MEMMAP_F_ALIGN_SSE, and
7046 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7047 * Pass zero to skip alignment.
7048 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7049 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7050 * needs counting as such in the statistics.
7051 */
7052template<bool a_fSafeCall = false>
7053static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7054 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7055{
7056 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7057
7058 /*
7059 * Check the input, check segment access and adjust address
7060 * with segment base.
7061 */
7062 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7063 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7064 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7065
7066 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7067 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7068 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7069
7070 /*
7071 * Alignment check.
7072 */
7073 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7074 { /* likelyish */ }
7075 else
7076 {
7077 /* Misaligned access. */
7078 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7079 {
7080 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7081 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7082 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7083 {
7084 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7085
7086 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7087 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7088 }
7089 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7090 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7091 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7092 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7093 * that's what FXSAVE does on a 10980xe. */
7094 && iemMemAreAlignmentChecksEnabled(pVCpu))
7095 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7096 else
7097 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7098 }
7099
7100#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7101 /* If the access is atomic there are host platform alignmnet restrictions
7102 we need to conform with. */
7103 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7104# if defined(RT_ARCH_AMD64)
7105 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7106# elif defined(RT_ARCH_ARM64)
7107 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7108# else
7109# error port me
7110# endif
7111 )
7112 { /* okay */ }
7113 else
7114 {
7115 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7116 pVCpu->iem.s.cMisalignedAtomics += 1;
7117 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7118 }
7119#endif
7120 }
7121
7122 /*
7123 * Figure out which mapping entry to use.
7124 */
7125 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7126 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7127 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7128 {
7129 iMemMap = iemMemMapFindFree(pVCpu);
7130 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7131 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7132 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7133 pVCpu->iem.s.aMemMappings[2].fAccess),
7134 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7135 }
7136
7137 /*
7138 * Crossing a page boundary?
7139 */
7140 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7141 { /* No (likely). */ }
7142 else
7143 {
7144 void *pvMem;
7145 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7146 if (rcStrict == VINF_SUCCESS)
7147 return pvMem;
7148 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7149 }
7150
7151#ifdef IEM_WITH_DATA_TLB
7152 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7153
7154 /*
7155 * Get the TLB entry for this page checking that it has the A & D bits
7156 * set as per fAccess flags.
7157 */
7158 /** @todo make the caller pass these in with fAccess. */
7159 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7160 ? IEMTLBE_F_PT_NO_USER : 0;
7161 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7162 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7163 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7164 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7165 ? IEMTLBE_F_PT_NO_WRITE : 0)
7166 : 0;
7167 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7168 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7169 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7170 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7171 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7172 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7173 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7174 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7175 {
7176# ifdef IEM_WITH_TLB_STATISTICS
7177 if (a_fSafeCall)
7178 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7179 else
7180 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7181# endif
7182 }
7183 else
7184 {
7185 if (a_fSafeCall)
7186 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7187 else
7188 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7189
7190 /* This page table walking will set A and D bits as required by the
7191 access while performing the walk.
7192 ASSUMES these are set when the address is translated rather than on commit... */
7193 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7194 PGMPTWALKFAST WalkFast;
7195 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7196 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7197 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7198 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7199 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7200 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7201 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7202 fQPage |= PGMQPAGE_F_USER_MODE;
7203 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7204 if (RT_SUCCESS(rc))
7205 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7206 else
7207 {
7208 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7209# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7210 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7211 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7212# endif
7213 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7214 }
7215
7216 uint32_t fDataBps;
7217 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7218 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7219 {
7220 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7221 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7222 {
7223 pTlbe--;
7224 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7225 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7226 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7227 }
7228 else
7229 {
7230 if (a_fSafeCall)
7231 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7232 else
7233 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7234 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7235 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7236 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7237 }
7238 }
7239 else
7240 {
7241 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7242 to the page with the data access breakpoint armed on it to pass thru here. */
7243 if (fDataBps > 1)
7244 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7245 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7246 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7247 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7248 pTlbe->uTag = uTagNoRev;
7249 }
7250 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7251 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7252 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7253 pTlbe->GCPhys = GCPhysPg;
7254 pTlbe->pbMappingR3 = NULL;
7255 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7256 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7257 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7258
7259 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7260 {
7261 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7262 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7263 else
7264 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7265 }
7266
7267 /* Resolve the physical address. */
7268 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7269 uint8_t *pbMemFullLoad = NULL;
7270 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7271 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7272 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7273# ifdef IN_RING3
7274 pTlbe->pbMappingR3 = pbMemFullLoad;
7275# endif
7276 }
7277
7278 /*
7279 * Check the flags and physical revision.
7280 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7281 * just to keep the code structure simple (i.e. avoid gotos or similar).
7282 */
7283 uint8_t *pbMem;
7284 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7285 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7286# ifdef IN_RING3
7287 pbMem = pTlbe->pbMappingR3;
7288# else
7289 pbMem = NULL;
7290# endif
7291 else
7292 {
7293 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7294
7295 /*
7296 * Okay, something isn't quite right or needs refreshing.
7297 */
7298 /* Write to read only memory? */
7299 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7300 {
7301 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7302# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7303/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7304 * to trigger an \#PG or a VM nested paging exit here yet! */
7305 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7306 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7307# endif
7308 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7309 }
7310
7311 /* Kernel memory accessed by userland? */
7312 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7313 {
7314 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7315# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7316/** @todo TLB: See above. */
7317 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7318 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7319# endif
7320 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7321 }
7322
7323 /*
7324 * Check if the physical page info needs updating.
7325 */
7326 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7327# ifdef IN_RING3
7328 pbMem = pTlbe->pbMappingR3;
7329# else
7330 pbMem = NULL;
7331# endif
7332 else
7333 {
7334 pTlbe->pbMappingR3 = NULL;
7335 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7336 pbMem = NULL;
7337 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7338 &pbMem, &pTlbe->fFlagsAndPhysRev);
7339 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7340# ifdef IN_RING3
7341 pTlbe->pbMappingR3 = pbMem;
7342# endif
7343 }
7344
7345 /*
7346 * Check the physical page level access and mapping.
7347 */
7348 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7349 { /* probably likely */ }
7350 else
7351 {
7352 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7353 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7354 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7355 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7356 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7357 if (rcStrict == VINF_SUCCESS)
7358 return pbMem;
7359 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7360 }
7361 }
7362 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7363
7364 if (pbMem)
7365 {
7366 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7367 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7368 fAccess |= IEM_ACCESS_NOT_LOCKED;
7369 }
7370 else
7371 {
7372 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7373 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7374 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7375 if (rcStrict == VINF_SUCCESS)
7376 {
7377 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7378 return pbMem;
7379 }
7380 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7381 }
7382
7383 void * const pvMem = pbMem;
7384
7385 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7386 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7387 if (fAccess & IEM_ACCESS_TYPE_READ)
7388 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7389
7390#else /* !IEM_WITH_DATA_TLB */
7391
7392
7393 RTGCPHYS GCPhysFirst;
7394 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7395 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7396 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7397
7398 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7399 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7400 if (fAccess & IEM_ACCESS_TYPE_READ)
7401 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7402
7403 void *pvMem;
7404 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7405 if (rcStrict == VINF_SUCCESS)
7406 { /* likely */ }
7407 else
7408 {
7409 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7410 if (rcStrict == VINF_SUCCESS)
7411 return pvMem;
7412 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7413 }
7414
7415#endif /* !IEM_WITH_DATA_TLB */
7416
7417 /*
7418 * Fill in the mapping table entry.
7419 */
7420 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7421 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7422 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7423 pVCpu->iem.s.cActiveMappings++;
7424
7425 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7426
7427 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7428 return pvMem;
7429}
7430
7431
7432/** @see iemMemMapJmp */
7433static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7434 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7435{
7436 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7437}
7438
7439
7440/**
7441 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7442 *
7443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7444 * @param pvMem The mapping.
7445 * @param fAccess The kind of access.
7446 */
7447void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7448{
7449 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7450 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7451 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7452 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7453 == ((unsigned)bUnmapInfo >> 4),
7454 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7455
7456 /* If it's bounce buffered, we may need to write back the buffer. */
7457 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7458 {
7459 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7460 {
7461 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7462 if (rcStrict == VINF_SUCCESS)
7463 return;
7464 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7465 }
7466 }
7467 /* Otherwise unlock it. */
7468 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7469 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7470
7471 /* Free the entry. */
7472 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7473 Assert(pVCpu->iem.s.cActiveMappings != 0);
7474 pVCpu->iem.s.cActiveMappings--;
7475}
7476
7477
7478/** Fallback for iemMemCommitAndUnmapRwJmp. */
7479void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7480{
7481 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7482 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7483}
7484
7485
7486/** Fallback for iemMemCommitAndUnmapAtJmp. */
7487void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7488{
7489 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7490 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7491}
7492
7493
7494/** Fallback for iemMemCommitAndUnmapWoJmp. */
7495void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7496{
7497 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7498 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7499}
7500
7501
7502/** Fallback for iemMemCommitAndUnmapRoJmp. */
7503void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7504{
7505 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7506 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7507}
7508
7509
7510/** Fallback for iemMemRollbackAndUnmapWo. */
7511void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7512{
7513 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7514 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7515}
7516
7517#endif /* IEM_WITH_SETJMP */
7518
7519#ifndef IN_RING3
7520/**
7521 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7522 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7523 *
7524 * Allows the instruction to be completed and retired, while the IEM user will
7525 * return to ring-3 immediately afterwards and do the postponed writes there.
7526 *
7527 * @returns VBox status code (no strict statuses). Caller must check
7528 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 * @param pvMem The mapping.
7531 * @param fAccess The kind of access.
7532 */
7533VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7534{
7535 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7536 AssertMsgReturn( (bUnmapInfo & 0x08)
7537 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7538 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7539 == ((unsigned)bUnmapInfo >> 4),
7540 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7541 VERR_NOT_FOUND);
7542
7543 /* If it's bounce buffered, we may need to write back the buffer. */
7544 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7545 {
7546 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7547 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7548 }
7549 /* Otherwise unlock it. */
7550 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7551 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7552
7553 /* Free the entry. */
7554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7555 Assert(pVCpu->iem.s.cActiveMappings != 0);
7556 pVCpu->iem.s.cActiveMappings--;
7557 return VINF_SUCCESS;
7558}
7559#endif
7560
7561
7562/**
7563 * Rollbacks mappings, releasing page locks and such.
7564 *
7565 * The caller shall only call this after checking cActiveMappings.
7566 *
7567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7568 */
7569void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7570{
7571 Assert(pVCpu->iem.s.cActiveMappings > 0);
7572
7573 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7574 while (iMemMap-- > 0)
7575 {
7576 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7577 if (fAccess != IEM_ACCESS_INVALID)
7578 {
7579 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7580 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7581 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7582 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7583 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7584 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7585 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7586 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7587 pVCpu->iem.s.cActiveMappings--;
7588 }
7589 }
7590}
7591
7592
7593/*
7594 * Instantiate R/W templates.
7595 */
7596#define TMPL_MEM_WITH_STACK
7597
7598#define TMPL_MEM_TYPE uint8_t
7599#define TMPL_MEM_FN_SUFF U8
7600#define TMPL_MEM_FMT_TYPE "%#04x"
7601#define TMPL_MEM_FMT_DESC "byte"
7602#include "IEMAllMemRWTmpl.cpp.h"
7603
7604#define TMPL_MEM_TYPE uint16_t
7605#define TMPL_MEM_FN_SUFF U16
7606#define TMPL_MEM_FMT_TYPE "%#06x"
7607#define TMPL_MEM_FMT_DESC "word"
7608#include "IEMAllMemRWTmpl.cpp.h"
7609
7610#define TMPL_WITH_PUSH_SREG
7611#define TMPL_MEM_TYPE uint32_t
7612#define TMPL_MEM_FN_SUFF U32
7613#define TMPL_MEM_FMT_TYPE "%#010x"
7614#define TMPL_MEM_FMT_DESC "dword"
7615#include "IEMAllMemRWTmpl.cpp.h"
7616#undef TMPL_WITH_PUSH_SREG
7617
7618#define TMPL_MEM_TYPE uint64_t
7619#define TMPL_MEM_FN_SUFF U64
7620#define TMPL_MEM_FMT_TYPE "%#018RX64"
7621#define TMPL_MEM_FMT_DESC "qword"
7622#include "IEMAllMemRWTmpl.cpp.h"
7623
7624#undef TMPL_MEM_WITH_STACK
7625
7626#define TMPL_MEM_TYPE uint32_t
7627#define TMPL_MEM_TYPE_ALIGN 0
7628#define TMPL_MEM_FN_SUFF U32NoAc
7629#define TMPL_MEM_FMT_TYPE "%#010x"
7630#define TMPL_MEM_FMT_DESC "dword"
7631#include "IEMAllMemRWTmpl.cpp.h"
7632#undef TMPL_WITH_PUSH_SREG
7633
7634#define TMPL_MEM_TYPE uint64_t
7635#define TMPL_MEM_TYPE_ALIGN 0
7636#define TMPL_MEM_FN_SUFF U64NoAc
7637#define TMPL_MEM_FMT_TYPE "%#018RX64"
7638#define TMPL_MEM_FMT_DESC "qword"
7639#include "IEMAllMemRWTmpl.cpp.h"
7640
7641#define TMPL_MEM_TYPE uint64_t
7642#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7643#define TMPL_MEM_FN_SUFF U64AlignedU128
7644#define TMPL_MEM_FMT_TYPE "%#018RX64"
7645#define TMPL_MEM_FMT_DESC "qword"
7646#include "IEMAllMemRWTmpl.cpp.h"
7647
7648/* See IEMAllMemRWTmplInline.cpp.h */
7649#define TMPL_MEM_BY_REF
7650
7651#define TMPL_MEM_TYPE RTFLOAT80U
7652#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7653#define TMPL_MEM_FN_SUFF R80
7654#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7655#define TMPL_MEM_FMT_DESC "tword"
7656#include "IEMAllMemRWTmpl.cpp.h"
7657
7658#define TMPL_MEM_TYPE RTPBCD80U
7659#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7660#define TMPL_MEM_FN_SUFF D80
7661#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7662#define TMPL_MEM_FMT_DESC "tword"
7663#include "IEMAllMemRWTmpl.cpp.h"
7664
7665#define TMPL_MEM_TYPE RTUINT128U
7666#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7667#define TMPL_MEM_FN_SUFF U128
7668#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7669#define TMPL_MEM_FMT_DESC "dqword"
7670#include "IEMAllMemRWTmpl.cpp.h"
7671
7672#define TMPL_MEM_TYPE RTUINT128U
7673#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7674#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7675#define TMPL_MEM_FN_SUFF U128AlignedSse
7676#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7677#define TMPL_MEM_FMT_DESC "dqword"
7678#include "IEMAllMemRWTmpl.cpp.h"
7679
7680#define TMPL_MEM_TYPE RTUINT128U
7681#define TMPL_MEM_TYPE_ALIGN 0
7682#define TMPL_MEM_FN_SUFF U128NoAc
7683#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7684#define TMPL_MEM_FMT_DESC "dqword"
7685#include "IEMAllMemRWTmpl.cpp.h"
7686
7687#define TMPL_MEM_TYPE RTUINT256U
7688#define TMPL_MEM_TYPE_ALIGN 0
7689#define TMPL_MEM_FN_SUFF U256NoAc
7690#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7691#define TMPL_MEM_FMT_DESC "qqword"
7692#include "IEMAllMemRWTmpl.cpp.h"
7693
7694#define TMPL_MEM_TYPE RTUINT256U
7695#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7696#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7697#define TMPL_MEM_FN_SUFF U256AlignedAvx
7698#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7699#define TMPL_MEM_FMT_DESC "qqword"
7700#include "IEMAllMemRWTmpl.cpp.h"
7701
7702/**
7703 * Fetches a data dword and zero extends it to a qword.
7704 *
7705 * @returns Strict VBox status code.
7706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7707 * @param pu64Dst Where to return the qword.
7708 * @param iSegReg The index of the segment register to use for
7709 * this access. The base and limits are checked.
7710 * @param GCPtrMem The address of the guest memory.
7711 */
7712VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7713{
7714 /* The lazy approach for now... */
7715 uint8_t bUnmapInfo;
7716 uint32_t const *pu32Src;
7717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7718 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7719 if (rc == VINF_SUCCESS)
7720 {
7721 *pu64Dst = *pu32Src;
7722 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7723 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7724 }
7725 return rc;
7726}
7727
7728
7729#ifdef SOME_UNUSED_FUNCTION
7730/**
7731 * Fetches a data dword and sign extends it to a qword.
7732 *
7733 * @returns Strict VBox status code.
7734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7735 * @param pu64Dst Where to return the sign extended value.
7736 * @param iSegReg The index of the segment register to use for
7737 * this access. The base and limits are checked.
7738 * @param GCPtrMem The address of the guest memory.
7739 */
7740VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7741{
7742 /* The lazy approach for now... */
7743 uint8_t bUnmapInfo;
7744 int32_t const *pi32Src;
7745 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7746 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7747 if (rc == VINF_SUCCESS)
7748 {
7749 *pu64Dst = *pi32Src;
7750 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7751 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7752 }
7753#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7754 else
7755 *pu64Dst = 0;
7756#endif
7757 return rc;
7758}
7759#endif
7760
7761
7762/**
7763 * Fetches a descriptor register (lgdt, lidt).
7764 *
7765 * @returns Strict VBox status code.
7766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7767 * @param pcbLimit Where to return the limit.
7768 * @param pGCPtrBase Where to return the base.
7769 * @param iSegReg The index of the segment register to use for
7770 * this access. The base and limits are checked.
7771 * @param GCPtrMem The address of the guest memory.
7772 * @param enmOpSize The effective operand size.
7773 */
7774VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7775 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7776{
7777 /*
7778 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7779 * little special:
7780 * - The two reads are done separately.
7781 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7782 * - We suspect the 386 to actually commit the limit before the base in
7783 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7784 * don't try emulate this eccentric behavior, because it's not well
7785 * enough understood and rather hard to trigger.
7786 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7787 */
7788 VBOXSTRICTRC rcStrict;
7789 if (IEM_IS_64BIT_CODE(pVCpu))
7790 {
7791 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7792 if (rcStrict == VINF_SUCCESS)
7793 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7794 }
7795 else
7796 {
7797 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7798 if (enmOpSize == IEMMODE_32BIT)
7799 {
7800 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7801 {
7802 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7803 if (rcStrict == VINF_SUCCESS)
7804 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7805 }
7806 else
7807 {
7808 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7809 if (rcStrict == VINF_SUCCESS)
7810 {
7811 *pcbLimit = (uint16_t)uTmp;
7812 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7813 }
7814 }
7815 if (rcStrict == VINF_SUCCESS)
7816 *pGCPtrBase = uTmp;
7817 }
7818 else
7819 {
7820 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7821 if (rcStrict == VINF_SUCCESS)
7822 {
7823 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7824 if (rcStrict == VINF_SUCCESS)
7825 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7826 }
7827 }
7828 }
7829 return rcStrict;
7830}
7831
7832
7833/**
7834 * Stores a data dqword, SSE aligned.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param iSegReg The index of the segment register to use for
7839 * this access. The base and limits are checked.
7840 * @param GCPtrMem The address of the guest memory.
7841 * @param u128Value The value to store.
7842 */
7843VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7844{
7845 /* The lazy approach for now... */
7846 uint8_t bUnmapInfo;
7847 PRTUINT128U pu128Dst;
7848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7849 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7850 if (rc == VINF_SUCCESS)
7851 {
7852 pu128Dst->au64[0] = u128Value.au64[0];
7853 pu128Dst->au64[1] = u128Value.au64[1];
7854 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7855 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7856 }
7857 return rc;
7858}
7859
7860
7861#ifdef IEM_WITH_SETJMP
7862/**
7863 * Stores a data dqword, SSE aligned.
7864 *
7865 * @returns Strict VBox status code.
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param iSegReg The index of the segment register to use for
7868 * this access. The base and limits are checked.
7869 * @param GCPtrMem The address of the guest memory.
7870 * @param u128Value The value to store.
7871 */
7872void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7873 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7874{
7875 /* The lazy approach for now... */
7876 uint8_t bUnmapInfo;
7877 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7878 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7879 pu128Dst->au64[0] = u128Value.au64[0];
7880 pu128Dst->au64[1] = u128Value.au64[1];
7881 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7882 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7883}
7884#endif
7885
7886
7887/**
7888 * Stores a data dqword.
7889 *
7890 * @returns Strict VBox status code.
7891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7892 * @param iSegReg The index of the segment register to use for
7893 * this access. The base and limits are checked.
7894 * @param GCPtrMem The address of the guest memory.
7895 * @param pu256Value Pointer to the value to store.
7896 */
7897VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7898{
7899 /* The lazy approach for now... */
7900 uint8_t bUnmapInfo;
7901 PRTUINT256U pu256Dst;
7902 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7903 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7904 if (rc == VINF_SUCCESS)
7905 {
7906 pu256Dst->au64[0] = pu256Value->au64[0];
7907 pu256Dst->au64[1] = pu256Value->au64[1];
7908 pu256Dst->au64[2] = pu256Value->au64[2];
7909 pu256Dst->au64[3] = pu256Value->au64[3];
7910 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7911 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7912 }
7913 return rc;
7914}
7915
7916
7917#ifdef IEM_WITH_SETJMP
7918/**
7919 * Stores a data dqword, longjmp on error.
7920 *
7921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7922 * @param iSegReg The index of the segment register to use for
7923 * this access. The base and limits are checked.
7924 * @param GCPtrMem The address of the guest memory.
7925 * @param pu256Value Pointer to the value to store.
7926 */
7927void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7928{
7929 /* The lazy approach for now... */
7930 uint8_t bUnmapInfo;
7931 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7932 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7933 pu256Dst->au64[0] = pu256Value->au64[0];
7934 pu256Dst->au64[1] = pu256Value->au64[1];
7935 pu256Dst->au64[2] = pu256Value->au64[2];
7936 pu256Dst->au64[3] = pu256Value->au64[3];
7937 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7938 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7939}
7940#endif
7941
7942
7943/**
7944 * Stores a descriptor register (sgdt, sidt).
7945 *
7946 * @returns Strict VBox status code.
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param cbLimit The limit.
7949 * @param GCPtrBase The base address.
7950 * @param iSegReg The index of the segment register to use for
7951 * this access. The base and limits are checked.
7952 * @param GCPtrMem The address of the guest memory.
7953 */
7954VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7955{
7956 /*
7957 * The SIDT and SGDT instructions actually stores the data using two
7958 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7959 * does not respond to opsize prefixes.
7960 */
7961 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7962 if (rcStrict == VINF_SUCCESS)
7963 {
7964 if (IEM_IS_16BIT_CODE(pVCpu))
7965 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7966 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7967 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7968 else if (IEM_IS_32BIT_CODE(pVCpu))
7969 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7970 else
7971 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7972 }
7973 return rcStrict;
7974}
7975
7976
7977/**
7978 * Begin a special stack push (used by interrupt, exceptions and such).
7979 *
7980 * This will raise \#SS or \#PF if appropriate.
7981 *
7982 * @returns Strict VBox status code.
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param cbMem The number of bytes to push onto the stack.
7985 * @param cbAlign The alignment mask (7, 3, 1).
7986 * @param ppvMem Where to return the pointer to the stack memory.
7987 * As with the other memory functions this could be
7988 * direct access or bounce buffered access, so
7989 * don't commit register until the commit call
7990 * succeeds.
7991 * @param pbUnmapInfo Where to store unmap info for
7992 * iemMemStackPushCommitSpecial.
7993 * @param puNewRsp Where to return the new RSP value. This must be
7994 * passed unchanged to
7995 * iemMemStackPushCommitSpecial().
7996 */
7997VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7998 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7999{
8000 Assert(cbMem < UINT8_MAX);
8001 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8002 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
8003}
8004
8005
8006/**
8007 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8008 *
8009 * This will update the rSP.
8010 *
8011 * @returns Strict VBox status code.
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
8014 * @param uNewRsp The new RSP value returned by
8015 * iemMemStackPushBeginSpecial().
8016 */
8017VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
8018{
8019 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8020 if (rcStrict == VINF_SUCCESS)
8021 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8022 return rcStrict;
8023}
8024
8025
8026/**
8027 * Begin a special stack pop (used by iret, retf and such).
8028 *
8029 * This will raise \#SS or \#PF if appropriate.
8030 *
8031 * @returns Strict VBox status code.
8032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8033 * @param cbMem The number of bytes to pop from the stack.
8034 * @param cbAlign The alignment mask (7, 3, 1).
8035 * @param ppvMem Where to return the pointer to the stack memory.
8036 * @param pbUnmapInfo Where to store unmap info for
8037 * iemMemStackPopDoneSpecial.
8038 * @param puNewRsp Where to return the new RSP value. This must be
8039 * assigned to CPUMCTX::rsp manually some time
8040 * after iemMemStackPopDoneSpecial() has been
8041 * called.
8042 */
8043VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8044 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8045{
8046 Assert(cbMem < UINT8_MAX);
8047 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8048 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8049}
8050
8051
8052/**
8053 * Continue a special stack pop (used by iret and retf), for the purpose of
8054 * retrieving a new stack pointer.
8055 *
8056 * This will raise \#SS or \#PF if appropriate.
8057 *
8058 * @returns Strict VBox status code.
8059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8060 * @param off Offset from the top of the stack. This is zero
8061 * except in the retf case.
8062 * @param cbMem The number of bytes to pop from the stack.
8063 * @param ppvMem Where to return the pointer to the stack memory.
8064 * @param pbUnmapInfo Where to store unmap info for
8065 * iemMemStackPopDoneSpecial.
8066 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8067 * return this because all use of this function is
8068 * to retrieve a new value and anything we return
8069 * here would be discarded.)
8070 */
8071VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8072 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8073{
8074 Assert(cbMem < UINT8_MAX);
8075
8076 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8077 RTGCPTR GCPtrTop;
8078 if (IEM_IS_64BIT_CODE(pVCpu))
8079 GCPtrTop = uCurNewRsp;
8080 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8081 GCPtrTop = (uint32_t)uCurNewRsp;
8082 else
8083 GCPtrTop = (uint16_t)uCurNewRsp;
8084
8085 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8086 0 /* checked in iemMemStackPopBeginSpecial */);
8087}
8088
8089
8090/**
8091 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8092 * iemMemStackPopContinueSpecial).
8093 *
8094 * The caller will manually commit the rSP.
8095 *
8096 * @returns Strict VBox status code.
8097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8098 * @param bUnmapInfo Unmap information returned by
8099 * iemMemStackPopBeginSpecial() or
8100 * iemMemStackPopContinueSpecial().
8101 */
8102VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8103{
8104 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8105}
8106
8107
8108/**
8109 * Fetches a system table byte.
8110 *
8111 * @returns Strict VBox status code.
8112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8113 * @param pbDst Where to return the byte.
8114 * @param iSegReg The index of the segment register to use for
8115 * this access. The base and limits are checked.
8116 * @param GCPtrMem The address of the guest memory.
8117 */
8118VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8119{
8120 /* The lazy approach for now... */
8121 uint8_t bUnmapInfo;
8122 uint8_t const *pbSrc;
8123 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8124 if (rc == VINF_SUCCESS)
8125 {
8126 *pbDst = *pbSrc;
8127 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8128 }
8129 return rc;
8130}
8131
8132
8133/**
8134 * Fetches a system table word.
8135 *
8136 * @returns Strict VBox status code.
8137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8138 * @param pu16Dst Where to return the word.
8139 * @param iSegReg The index of the segment register to use for
8140 * this access. The base and limits are checked.
8141 * @param GCPtrMem The address of the guest memory.
8142 */
8143VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8144{
8145 /* The lazy approach for now... */
8146 uint8_t bUnmapInfo;
8147 uint16_t const *pu16Src;
8148 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8149 if (rc == VINF_SUCCESS)
8150 {
8151 *pu16Dst = *pu16Src;
8152 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8153 }
8154 return rc;
8155}
8156
8157
8158/**
8159 * Fetches a system table dword.
8160 *
8161 * @returns Strict VBox status code.
8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8163 * @param pu32Dst Where to return the dword.
8164 * @param iSegReg The index of the segment register to use for
8165 * this access. The base and limits are checked.
8166 * @param GCPtrMem The address of the guest memory.
8167 */
8168VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8169{
8170 /* The lazy approach for now... */
8171 uint8_t bUnmapInfo;
8172 uint32_t const *pu32Src;
8173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8174 if (rc == VINF_SUCCESS)
8175 {
8176 *pu32Dst = *pu32Src;
8177 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8178 }
8179 return rc;
8180}
8181
8182
8183/**
8184 * Fetches a system table qword.
8185 *
8186 * @returns Strict VBox status code.
8187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8188 * @param pu64Dst Where to return the qword.
8189 * @param iSegReg The index of the segment register to use for
8190 * this access. The base and limits are checked.
8191 * @param GCPtrMem The address of the guest memory.
8192 */
8193VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8194{
8195 /* The lazy approach for now... */
8196 uint8_t bUnmapInfo;
8197 uint64_t const *pu64Src;
8198 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8199 if (rc == VINF_SUCCESS)
8200 {
8201 *pu64Dst = *pu64Src;
8202 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8203 }
8204 return rc;
8205}
8206
8207
8208/**
8209 * Fetches a descriptor table entry with caller specified error code.
8210 *
8211 * @returns Strict VBox status code.
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param pDesc Where to return the descriptor table entry.
8214 * @param uSel The selector which table entry to fetch.
8215 * @param uXcpt The exception to raise on table lookup error.
8216 * @param uErrorCode The error code associated with the exception.
8217 */
8218static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8219 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8220{
8221 AssertPtr(pDesc);
8222 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8223
8224 /** @todo did the 286 require all 8 bytes to be accessible? */
8225 /*
8226 * Get the selector table base and check bounds.
8227 */
8228 RTGCPTR GCPtrBase;
8229 if (uSel & X86_SEL_LDT)
8230 {
8231 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8232 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8233 {
8234 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8235 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8236 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8237 uErrorCode, 0);
8238 }
8239
8240 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8241 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8242 }
8243 else
8244 {
8245 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8246 {
8247 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8248 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8249 uErrorCode, 0);
8250 }
8251 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8252 }
8253
8254 /*
8255 * Read the legacy descriptor and maybe the long mode extensions if
8256 * required.
8257 */
8258 VBOXSTRICTRC rcStrict;
8259 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8260 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8261 else
8262 {
8263 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8264 if (rcStrict == VINF_SUCCESS)
8265 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8266 if (rcStrict == VINF_SUCCESS)
8267 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8268 if (rcStrict == VINF_SUCCESS)
8269 pDesc->Legacy.au16[3] = 0;
8270 else
8271 return rcStrict;
8272 }
8273
8274 if (rcStrict == VINF_SUCCESS)
8275 {
8276 if ( !IEM_IS_LONG_MODE(pVCpu)
8277 || pDesc->Legacy.Gen.u1DescType)
8278 pDesc->Long.au64[1] = 0;
8279 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8280 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8281 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8282 else
8283 {
8284 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8285 /** @todo is this the right exception? */
8286 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8287 }
8288 }
8289 return rcStrict;
8290}
8291
8292
8293/**
8294 * Fetches a descriptor table entry.
8295 *
8296 * @returns Strict VBox status code.
8297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8298 * @param pDesc Where to return the descriptor table entry.
8299 * @param uSel The selector which table entry to fetch.
8300 * @param uXcpt The exception to raise on table lookup error.
8301 */
8302VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8303{
8304 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8305}
8306
8307
8308/**
8309 * Marks the selector descriptor as accessed (only non-system descriptors).
8310 *
8311 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8312 * will therefore skip the limit checks.
8313 *
8314 * @returns Strict VBox status code.
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 * @param uSel The selector.
8317 */
8318VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8319{
8320 /*
8321 * Get the selector table base and calculate the entry address.
8322 */
8323 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8324 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8325 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8326 GCPtr += uSel & X86_SEL_MASK;
8327
8328 /*
8329 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8330 * ugly stuff to avoid this. This will make sure it's an atomic access
8331 * as well more or less remove any question about 8-bit or 32-bit accesss.
8332 */
8333 VBOXSTRICTRC rcStrict;
8334 uint8_t bUnmapInfo;
8335 uint32_t volatile *pu32;
8336 if ((GCPtr & 3) == 0)
8337 {
8338 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8339 GCPtr += 2 + 2;
8340 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8341 if (rcStrict != VINF_SUCCESS)
8342 return rcStrict;
8343 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8344 }
8345 else
8346 {
8347 /* The misaligned GDT/LDT case, map the whole thing. */
8348 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8349 if (rcStrict != VINF_SUCCESS)
8350 return rcStrict;
8351 switch ((uintptr_t)pu32 & 3)
8352 {
8353 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8354 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8355 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8356 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8357 }
8358 }
8359
8360 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8361}
8362
8363
8364#undef LOG_GROUP
8365#define LOG_GROUP LOG_GROUP_IEM
8366
8367/** @} */
8368
8369/** @name Opcode Helpers.
8370 * @{
8371 */
8372
8373/**
8374 * Calculates the effective address of a ModR/M memory operand.
8375 *
8376 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8377 *
8378 * @return Strict VBox status code.
8379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8380 * @param bRm The ModRM byte.
8381 * @param cbImmAndRspOffset - First byte: The size of any immediate
8382 * following the effective address opcode bytes
8383 * (only for RIP relative addressing).
8384 * - Second byte: RSP displacement (for POP [ESP]).
8385 * @param pGCPtrEff Where to return the effective address.
8386 */
8387VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8388{
8389 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8390# define SET_SS_DEF() \
8391 do \
8392 { \
8393 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8394 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8395 } while (0)
8396
8397 if (!IEM_IS_64BIT_CODE(pVCpu))
8398 {
8399/** @todo Check the effective address size crap! */
8400 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8401 {
8402 uint16_t u16EffAddr;
8403
8404 /* Handle the disp16 form with no registers first. */
8405 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8406 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8407 else
8408 {
8409 /* Get the displacment. */
8410 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8411 {
8412 case 0: u16EffAddr = 0; break;
8413 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8414 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8415 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8416 }
8417
8418 /* Add the base and index registers to the disp. */
8419 switch (bRm & X86_MODRM_RM_MASK)
8420 {
8421 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8422 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8423 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8424 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8425 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8426 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8427 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8428 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8429 }
8430 }
8431
8432 *pGCPtrEff = u16EffAddr;
8433 }
8434 else
8435 {
8436 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8437 uint32_t u32EffAddr;
8438
8439 /* Handle the disp32 form with no registers first. */
8440 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8441 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8442 else
8443 {
8444 /* Get the register (or SIB) value. */
8445 switch ((bRm & X86_MODRM_RM_MASK))
8446 {
8447 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8448 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8449 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8450 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8451 case 4: /* SIB */
8452 {
8453 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8454
8455 /* Get the index and scale it. */
8456 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8457 {
8458 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8459 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8460 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8461 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8462 case 4: u32EffAddr = 0; /*none */ break;
8463 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8464 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8465 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8467 }
8468 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8469
8470 /* add base */
8471 switch (bSib & X86_SIB_BASE_MASK)
8472 {
8473 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8474 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8475 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8476 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8477 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8478 case 5:
8479 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8480 {
8481 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8482 SET_SS_DEF();
8483 }
8484 else
8485 {
8486 uint32_t u32Disp;
8487 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8488 u32EffAddr += u32Disp;
8489 }
8490 break;
8491 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8492 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8493 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8494 }
8495 break;
8496 }
8497 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8498 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8499 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8501 }
8502
8503 /* Get and add the displacement. */
8504 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8505 {
8506 case 0:
8507 break;
8508 case 1:
8509 {
8510 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8511 u32EffAddr += i8Disp;
8512 break;
8513 }
8514 case 2:
8515 {
8516 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8517 u32EffAddr += u32Disp;
8518 break;
8519 }
8520 default:
8521 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8522 }
8523
8524 }
8525 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8526 *pGCPtrEff = u32EffAddr;
8527 }
8528 }
8529 else
8530 {
8531 uint64_t u64EffAddr;
8532
8533 /* Handle the rip+disp32 form with no registers first. */
8534 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8535 {
8536 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8537 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8538 }
8539 else
8540 {
8541 /* Get the register (or SIB) value. */
8542 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8543 {
8544 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8545 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8546 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8547 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8548 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8549 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8550 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8551 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8552 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8553 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8554 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8555 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8556 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8557 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8558 /* SIB */
8559 case 4:
8560 case 12:
8561 {
8562 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8563
8564 /* Get the index and scale it. */
8565 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8566 {
8567 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8568 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8569 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8570 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8571 case 4: u64EffAddr = 0; /*none */ break;
8572 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8573 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8574 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8575 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8576 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8577 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8578 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8579 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8580 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8581 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8582 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8583 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8584 }
8585 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8586
8587 /* add base */
8588 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8589 {
8590 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8591 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8592 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8593 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8594 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8595 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8596 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8597 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8598 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8599 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8600 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8601 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8602 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8603 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8604 /* complicated encodings */
8605 case 5:
8606 case 13:
8607 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8608 {
8609 if (!pVCpu->iem.s.uRexB)
8610 {
8611 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8612 SET_SS_DEF();
8613 }
8614 else
8615 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8616 }
8617 else
8618 {
8619 uint32_t u32Disp;
8620 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8621 u64EffAddr += (int32_t)u32Disp;
8622 }
8623 break;
8624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8625 }
8626 break;
8627 }
8628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8629 }
8630
8631 /* Get and add the displacement. */
8632 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8633 {
8634 case 0:
8635 break;
8636 case 1:
8637 {
8638 int8_t i8Disp;
8639 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8640 u64EffAddr += i8Disp;
8641 break;
8642 }
8643 case 2:
8644 {
8645 uint32_t u32Disp;
8646 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8647 u64EffAddr += (int32_t)u32Disp;
8648 break;
8649 }
8650 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8651 }
8652
8653 }
8654
8655 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8656 *pGCPtrEff = u64EffAddr;
8657 else
8658 {
8659 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8660 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8661 }
8662 }
8663
8664 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8665 return VINF_SUCCESS;
8666}
8667
8668
8669#ifdef IEM_WITH_SETJMP
8670/**
8671 * Calculates the effective address of a ModR/M memory operand.
8672 *
8673 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8674 *
8675 * May longjmp on internal error.
8676 *
8677 * @return The effective address.
8678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8679 * @param bRm The ModRM byte.
8680 * @param cbImmAndRspOffset - First byte: The size of any immediate
8681 * following the effective address opcode bytes
8682 * (only for RIP relative addressing).
8683 * - Second byte: RSP displacement (for POP [ESP]).
8684 */
8685RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8686{
8687 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8688# define SET_SS_DEF() \
8689 do \
8690 { \
8691 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8692 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8693 } while (0)
8694
8695 if (!IEM_IS_64BIT_CODE(pVCpu))
8696 {
8697/** @todo Check the effective address size crap! */
8698 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8699 {
8700 uint16_t u16EffAddr;
8701
8702 /* Handle the disp16 form with no registers first. */
8703 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8704 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8705 else
8706 {
8707 /* Get the displacment. */
8708 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8709 {
8710 case 0: u16EffAddr = 0; break;
8711 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8712 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8713 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8714 }
8715
8716 /* Add the base and index registers to the disp. */
8717 switch (bRm & X86_MODRM_RM_MASK)
8718 {
8719 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8720 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8721 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8722 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8723 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8724 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8725 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8726 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8727 }
8728 }
8729
8730 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8731 return u16EffAddr;
8732 }
8733
8734 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8735 uint32_t u32EffAddr;
8736
8737 /* Handle the disp32 form with no registers first. */
8738 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8739 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8740 else
8741 {
8742 /* Get the register (or SIB) value. */
8743 switch ((bRm & X86_MODRM_RM_MASK))
8744 {
8745 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8746 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8747 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8748 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8749 case 4: /* SIB */
8750 {
8751 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8752
8753 /* Get the index and scale it. */
8754 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8755 {
8756 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8757 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8758 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8759 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8760 case 4: u32EffAddr = 0; /*none */ break;
8761 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8762 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8763 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8764 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8765 }
8766 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8767
8768 /* add base */
8769 switch (bSib & X86_SIB_BASE_MASK)
8770 {
8771 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8772 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8773 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8774 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8775 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8776 case 5:
8777 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8778 {
8779 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8780 SET_SS_DEF();
8781 }
8782 else
8783 {
8784 uint32_t u32Disp;
8785 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8786 u32EffAddr += u32Disp;
8787 }
8788 break;
8789 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8790 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8791 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8792 }
8793 break;
8794 }
8795 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8796 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8797 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8798 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8799 }
8800
8801 /* Get and add the displacement. */
8802 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8803 {
8804 case 0:
8805 break;
8806 case 1:
8807 {
8808 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8809 u32EffAddr += i8Disp;
8810 break;
8811 }
8812 case 2:
8813 {
8814 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8815 u32EffAddr += u32Disp;
8816 break;
8817 }
8818 default:
8819 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8820 }
8821 }
8822
8823 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8824 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8825 return u32EffAddr;
8826 }
8827
8828 uint64_t u64EffAddr;
8829
8830 /* Handle the rip+disp32 form with no registers first. */
8831 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8832 {
8833 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8834 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8835 }
8836 else
8837 {
8838 /* Get the register (or SIB) value. */
8839 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8840 {
8841 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8842 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8843 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8844 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8845 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8846 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8847 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8848 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8849 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8850 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8851 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8852 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8853 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8854 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8855 /* SIB */
8856 case 4:
8857 case 12:
8858 {
8859 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8860
8861 /* Get the index and scale it. */
8862 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8863 {
8864 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8865 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8866 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8867 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8868 case 4: u64EffAddr = 0; /*none */ break;
8869 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8870 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8871 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8872 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8873 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8874 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8875 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8876 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8877 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8878 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8879 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8880 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8881 }
8882 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8883
8884 /* add base */
8885 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8886 {
8887 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8888 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8889 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8890 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8891 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8892 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8893 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8894 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8895 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8896 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8897 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8898 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8899 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8900 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8901 /* complicated encodings */
8902 case 5:
8903 case 13:
8904 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8905 {
8906 if (!pVCpu->iem.s.uRexB)
8907 {
8908 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8909 SET_SS_DEF();
8910 }
8911 else
8912 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8913 }
8914 else
8915 {
8916 uint32_t u32Disp;
8917 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8918 u64EffAddr += (int32_t)u32Disp;
8919 }
8920 break;
8921 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8922 }
8923 break;
8924 }
8925 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8926 }
8927
8928 /* Get and add the displacement. */
8929 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8930 {
8931 case 0:
8932 break;
8933 case 1:
8934 {
8935 int8_t i8Disp;
8936 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8937 u64EffAddr += i8Disp;
8938 break;
8939 }
8940 case 2:
8941 {
8942 uint32_t u32Disp;
8943 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8944 u64EffAddr += (int32_t)u32Disp;
8945 break;
8946 }
8947 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8948 }
8949
8950 }
8951
8952 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8953 {
8954 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8955 return u64EffAddr;
8956 }
8957 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8958 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8959 return u64EffAddr & UINT32_MAX;
8960}
8961#endif /* IEM_WITH_SETJMP */
8962
8963
8964/**
8965 * Calculates the effective address of a ModR/M memory operand, extended version
8966 * for use in the recompilers.
8967 *
8968 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8969 *
8970 * @return Strict VBox status code.
8971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8972 * @param bRm The ModRM byte.
8973 * @param cbImmAndRspOffset - First byte: The size of any immediate
8974 * following the effective address opcode bytes
8975 * (only for RIP relative addressing).
8976 * - Second byte: RSP displacement (for POP [ESP]).
8977 * @param pGCPtrEff Where to return the effective address.
8978 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8979 * SIB byte (bits 39:32).
8980 */
8981VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8982{
8983 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8984# define SET_SS_DEF() \
8985 do \
8986 { \
8987 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8988 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8989 } while (0)
8990
8991 uint64_t uInfo;
8992 if (!IEM_IS_64BIT_CODE(pVCpu))
8993 {
8994/** @todo Check the effective address size crap! */
8995 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8996 {
8997 uint16_t u16EffAddr;
8998
8999 /* Handle the disp16 form with no registers first. */
9000 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9001 {
9002 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9003 uInfo = u16EffAddr;
9004 }
9005 else
9006 {
9007 /* Get the displacment. */
9008 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9009 {
9010 case 0: u16EffAddr = 0; break;
9011 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9012 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9013 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9014 }
9015 uInfo = u16EffAddr;
9016
9017 /* Add the base and index registers to the disp. */
9018 switch (bRm & X86_MODRM_RM_MASK)
9019 {
9020 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9021 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9022 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9023 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9024 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9025 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9026 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9027 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9028 }
9029 }
9030
9031 *pGCPtrEff = u16EffAddr;
9032 }
9033 else
9034 {
9035 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9036 uint32_t u32EffAddr;
9037
9038 /* Handle the disp32 form with no registers first. */
9039 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9040 {
9041 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9042 uInfo = u32EffAddr;
9043 }
9044 else
9045 {
9046 /* Get the register (or SIB) value. */
9047 uInfo = 0;
9048 switch ((bRm & X86_MODRM_RM_MASK))
9049 {
9050 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9051 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9052 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9053 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9054 case 4: /* SIB */
9055 {
9056 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9057 uInfo = (uint64_t)bSib << 32;
9058
9059 /* Get the index and scale it. */
9060 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9061 {
9062 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9063 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9064 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9065 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9066 case 4: u32EffAddr = 0; /*none */ break;
9067 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9068 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9069 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9070 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9071 }
9072 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9073
9074 /* add base */
9075 switch (bSib & X86_SIB_BASE_MASK)
9076 {
9077 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9078 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9079 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9080 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9081 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9082 case 5:
9083 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9084 {
9085 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9086 SET_SS_DEF();
9087 }
9088 else
9089 {
9090 uint32_t u32Disp;
9091 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9092 u32EffAddr += u32Disp;
9093 uInfo |= u32Disp;
9094 }
9095 break;
9096 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9097 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9099 }
9100 break;
9101 }
9102 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9103 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9104 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9106 }
9107
9108 /* Get and add the displacement. */
9109 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9110 {
9111 case 0:
9112 break;
9113 case 1:
9114 {
9115 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9116 u32EffAddr += i8Disp;
9117 uInfo |= (uint32_t)(int32_t)i8Disp;
9118 break;
9119 }
9120 case 2:
9121 {
9122 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9123 u32EffAddr += u32Disp;
9124 uInfo |= (uint32_t)u32Disp;
9125 break;
9126 }
9127 default:
9128 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9129 }
9130
9131 }
9132 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9133 *pGCPtrEff = u32EffAddr;
9134 }
9135 }
9136 else
9137 {
9138 uint64_t u64EffAddr;
9139
9140 /* Handle the rip+disp32 form with no registers first. */
9141 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9142 {
9143 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9144 uInfo = (uint32_t)u64EffAddr;
9145 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9146 }
9147 else
9148 {
9149 /* Get the register (or SIB) value. */
9150 uInfo = 0;
9151 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9152 {
9153 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9154 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9155 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9156 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9157 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9158 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9159 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9160 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9161 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9162 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9163 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9164 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9165 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9166 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9167 /* SIB */
9168 case 4:
9169 case 12:
9170 {
9171 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9172 uInfo = (uint64_t)bSib << 32;
9173
9174 /* Get the index and scale it. */
9175 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9176 {
9177 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9178 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9179 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9180 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9181 case 4: u64EffAddr = 0; /*none */ break;
9182 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9183 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9184 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9185 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9186 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9187 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9188 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9189 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9190 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9191 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9192 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9194 }
9195 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9196
9197 /* add base */
9198 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9199 {
9200 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9201 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9202 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9203 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9204 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9205 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9206 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9207 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9208 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9209 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9210 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9211 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9212 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9213 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9214 /* complicated encodings */
9215 case 5:
9216 case 13:
9217 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9218 {
9219 if (!pVCpu->iem.s.uRexB)
9220 {
9221 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9222 SET_SS_DEF();
9223 }
9224 else
9225 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9226 }
9227 else
9228 {
9229 uint32_t u32Disp;
9230 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9231 u64EffAddr += (int32_t)u32Disp;
9232 uInfo |= u32Disp;
9233 }
9234 break;
9235 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9236 }
9237 break;
9238 }
9239 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9240 }
9241
9242 /* Get and add the displacement. */
9243 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9244 {
9245 case 0:
9246 break;
9247 case 1:
9248 {
9249 int8_t i8Disp;
9250 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9251 u64EffAddr += i8Disp;
9252 uInfo |= (uint32_t)(int32_t)i8Disp;
9253 break;
9254 }
9255 case 2:
9256 {
9257 uint32_t u32Disp;
9258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9259 u64EffAddr += (int32_t)u32Disp;
9260 uInfo |= u32Disp;
9261 break;
9262 }
9263 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9264 }
9265
9266 }
9267
9268 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9269 *pGCPtrEff = u64EffAddr;
9270 else
9271 {
9272 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9273 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9274 }
9275 }
9276 *puInfo = uInfo;
9277
9278 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9279 return VINF_SUCCESS;
9280}
9281
9282/** @} */
9283
9284
9285#ifdef LOG_ENABLED
9286/**
9287 * Logs the current instruction.
9288 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9289 * @param fSameCtx Set if we have the same context information as the VMM,
9290 * clear if we may have already executed an instruction in
9291 * our debug context. When clear, we assume IEMCPU holds
9292 * valid CPU mode info.
9293 *
9294 * The @a fSameCtx parameter is now misleading and obsolete.
9295 * @param pszFunction The IEM function doing the execution.
9296 */
9297static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9298{
9299# ifdef IN_RING3
9300 if (LogIs2Enabled())
9301 {
9302 char szInstr[256];
9303 uint32_t cbInstr = 0;
9304 if (fSameCtx)
9305 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9306 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9307 szInstr, sizeof(szInstr), &cbInstr);
9308 else
9309 {
9310 uint32_t fFlags = 0;
9311 switch (IEM_GET_CPU_MODE(pVCpu))
9312 {
9313 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9314 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9315 case IEMMODE_16BIT:
9316 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9317 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9318 else
9319 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9320 break;
9321 }
9322 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9323 szInstr, sizeof(szInstr), &cbInstr);
9324 }
9325
9326 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9327 Log2(("**** %s fExec=%x\n"
9328 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9329 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9330 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9331 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9332 " %s\n"
9333 , pszFunction, pVCpu->iem.s.fExec,
9334 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9335 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9336 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9337 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9338 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9339 szInstr));
9340
9341 /* This stuff sucks atm. as it fills the log with MSRs. */
9342 //if (LogIs3Enabled())
9343 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9344 }
9345 else
9346# endif
9347 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9348 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9349 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9350}
9351#endif /* LOG_ENABLED */
9352
9353
9354#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9355/**
9356 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9357 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9358 *
9359 * @returns Modified rcStrict.
9360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9361 * @param rcStrict The instruction execution status.
9362 */
9363static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9364{
9365 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9366 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9367 {
9368 /* VMX preemption timer takes priority over NMI-window exits. */
9369 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9370 {
9371 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9372 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9373 }
9374 /*
9375 * Check remaining intercepts.
9376 *
9377 * NMI-window and Interrupt-window VM-exits.
9378 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9379 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9380 *
9381 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9382 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9383 */
9384 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9385 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9386 && !TRPMHasTrap(pVCpu))
9387 {
9388 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9389 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9390 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9391 {
9392 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9393 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9394 }
9395 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9396 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9397 {
9398 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9399 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9400 }
9401 }
9402 }
9403 /* TPR-below threshold/APIC write has the highest priority. */
9404 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9405 {
9406 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9407 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9408 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9409 }
9410 /* MTF takes priority over VMX-preemption timer. */
9411 else
9412 {
9413 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9414 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9415 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9416 }
9417 return rcStrict;
9418}
9419#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9420
9421
9422/**
9423 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9424 * IEMExecOneWithPrefetchedByPC.
9425 *
9426 * Similar code is found in IEMExecLots.
9427 *
9428 * @return Strict VBox status code.
9429 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9430 * @param fExecuteInhibit If set, execute the instruction following CLI,
9431 * POP SS and MOV SS,GR.
9432 * @param pszFunction The calling function name.
9433 */
9434DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9435{
9436 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9437 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9438 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9439 RT_NOREF_PV(pszFunction);
9440
9441#ifdef IEM_WITH_SETJMP
9442 VBOXSTRICTRC rcStrict;
9443 IEM_TRY_SETJMP(pVCpu, rcStrict)
9444 {
9445 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9446 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9447 }
9448 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9449 {
9450 pVCpu->iem.s.cLongJumps++;
9451 }
9452 IEM_CATCH_LONGJMP_END(pVCpu);
9453#else
9454 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9455 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9456#endif
9457 if (rcStrict == VINF_SUCCESS)
9458 pVCpu->iem.s.cInstructions++;
9459 if (pVCpu->iem.s.cActiveMappings > 0)
9460 {
9461 Assert(rcStrict != VINF_SUCCESS);
9462 iemMemRollback(pVCpu);
9463 }
9464 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9465 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9466 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9467
9468//#ifdef DEBUG
9469// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9470//#endif
9471
9472#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9473 /*
9474 * Perform any VMX nested-guest instruction boundary actions.
9475 *
9476 * If any of these causes a VM-exit, we must skip executing the next
9477 * instruction (would run into stale page tables). A VM-exit makes sure
9478 * there is no interrupt-inhibition, so that should ensure we don't go
9479 * to try execute the next instruction. Clearing fExecuteInhibit is
9480 * problematic because of the setjmp/longjmp clobbering above.
9481 */
9482 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9483 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9484 || rcStrict != VINF_SUCCESS)
9485 { /* likely */ }
9486 else
9487 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9488#endif
9489
9490 /* Execute the next instruction as well if a cli, pop ss or
9491 mov ss, Gr has just completed successfully. */
9492 if ( fExecuteInhibit
9493 && rcStrict == VINF_SUCCESS
9494 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9495 {
9496 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9497 if (rcStrict == VINF_SUCCESS)
9498 {
9499#ifdef LOG_ENABLED
9500 iemLogCurInstr(pVCpu, false, pszFunction);
9501#endif
9502#ifdef IEM_WITH_SETJMP
9503 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9504 {
9505 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9506 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9507 }
9508 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9509 {
9510 pVCpu->iem.s.cLongJumps++;
9511 }
9512 IEM_CATCH_LONGJMP_END(pVCpu);
9513#else
9514 IEM_OPCODE_GET_FIRST_U8(&b);
9515 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9516#endif
9517 if (rcStrict == VINF_SUCCESS)
9518 {
9519 pVCpu->iem.s.cInstructions++;
9520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9521 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9522 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9523 { /* likely */ }
9524 else
9525 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9526#endif
9527 }
9528 if (pVCpu->iem.s.cActiveMappings > 0)
9529 {
9530 Assert(rcStrict != VINF_SUCCESS);
9531 iemMemRollback(pVCpu);
9532 }
9533 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9534 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9535 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9536 }
9537 else if (pVCpu->iem.s.cActiveMappings > 0)
9538 iemMemRollback(pVCpu);
9539 /** @todo drop this after we bake this change into RIP advancing. */
9540 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9541 }
9542
9543 /*
9544 * Return value fiddling, statistics and sanity assertions.
9545 */
9546 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9547
9548 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9549 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9550 return rcStrict;
9551}
9552
9553
9554/**
9555 * Execute one instruction.
9556 *
9557 * @return Strict VBox status code.
9558 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9559 */
9560VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9561{
9562 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9563#ifdef LOG_ENABLED
9564 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9565#endif
9566
9567 /*
9568 * Do the decoding and emulation.
9569 */
9570 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9571 if (rcStrict == VINF_SUCCESS)
9572 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9573 else if (pVCpu->iem.s.cActiveMappings > 0)
9574 iemMemRollback(pVCpu);
9575
9576 if (rcStrict != VINF_SUCCESS)
9577 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9578 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9579 return rcStrict;
9580}
9581
9582
9583VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9584{
9585 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9586 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9587 if (rcStrict == VINF_SUCCESS)
9588 {
9589 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9590 if (pcbWritten)
9591 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9592 }
9593 else if (pVCpu->iem.s.cActiveMappings > 0)
9594 iemMemRollback(pVCpu);
9595
9596 return rcStrict;
9597}
9598
9599
9600VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9601 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9602{
9603 VBOXSTRICTRC rcStrict;
9604 if ( cbOpcodeBytes
9605 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9606 {
9607 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9608#ifdef IEM_WITH_CODE_TLB
9609 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9610 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9611 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9612 pVCpu->iem.s.offCurInstrStart = 0;
9613 pVCpu->iem.s.offInstrNextByte = 0;
9614 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9615#else
9616 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9617 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9618#endif
9619 rcStrict = VINF_SUCCESS;
9620 }
9621 else
9622 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9623 if (rcStrict == VINF_SUCCESS)
9624 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9625 else if (pVCpu->iem.s.cActiveMappings > 0)
9626 iemMemRollback(pVCpu);
9627
9628 return rcStrict;
9629}
9630
9631
9632VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9633{
9634 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9635 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9636 if (rcStrict == VINF_SUCCESS)
9637 {
9638 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9639 if (pcbWritten)
9640 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9641 }
9642 else if (pVCpu->iem.s.cActiveMappings > 0)
9643 iemMemRollback(pVCpu);
9644
9645 return rcStrict;
9646}
9647
9648
9649VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9650 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9651{
9652 VBOXSTRICTRC rcStrict;
9653 if ( cbOpcodeBytes
9654 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9655 {
9656 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9657#ifdef IEM_WITH_CODE_TLB
9658 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9659 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9660 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9661 pVCpu->iem.s.offCurInstrStart = 0;
9662 pVCpu->iem.s.offInstrNextByte = 0;
9663 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9664#else
9665 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9666 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9667#endif
9668 rcStrict = VINF_SUCCESS;
9669 }
9670 else
9671 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9672 if (rcStrict == VINF_SUCCESS)
9673 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9674 else if (pVCpu->iem.s.cActiveMappings > 0)
9675 iemMemRollback(pVCpu);
9676
9677 return rcStrict;
9678}
9679
9680
9681/**
9682 * For handling split cacheline lock operations when the host has split-lock
9683 * detection enabled.
9684 *
9685 * This will cause the interpreter to disregard the lock prefix and implicit
9686 * locking (xchg).
9687 *
9688 * @returns Strict VBox status code.
9689 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9690 */
9691VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9692{
9693 /*
9694 * Do the decoding and emulation.
9695 */
9696 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9697 if (rcStrict == VINF_SUCCESS)
9698 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9699 else if (pVCpu->iem.s.cActiveMappings > 0)
9700 iemMemRollback(pVCpu);
9701
9702 if (rcStrict != VINF_SUCCESS)
9703 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9704 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9705 return rcStrict;
9706}
9707
9708
9709/**
9710 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9711 * inject a pending TRPM trap.
9712 */
9713VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9714{
9715 Assert(TRPMHasTrap(pVCpu));
9716
9717 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9718 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9719 {
9720 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9721#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9722 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9723 if (fIntrEnabled)
9724 {
9725 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9726 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9727 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9728 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9729 else
9730 {
9731 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9732 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9733 }
9734 }
9735#else
9736 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9737#endif
9738 if (fIntrEnabled)
9739 {
9740 uint8_t u8TrapNo;
9741 TRPMEVENT enmType;
9742 uint32_t uErrCode;
9743 RTGCPTR uCr2;
9744 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9745 AssertRC(rc2);
9746 Assert(enmType == TRPM_HARDWARE_INT);
9747 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9748
9749 TRPMResetTrap(pVCpu);
9750
9751#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9752 /* Injecting an event may cause a VM-exit. */
9753 if ( rcStrict != VINF_SUCCESS
9754 && rcStrict != VINF_IEM_RAISED_XCPT)
9755 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9756#else
9757 NOREF(rcStrict);
9758#endif
9759 }
9760 }
9761
9762 return VINF_SUCCESS;
9763}
9764
9765
9766VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9767{
9768 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9769 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9770 Assert(cMaxInstructions > 0);
9771
9772 /*
9773 * See if there is an interrupt pending in TRPM, inject it if we can.
9774 */
9775 /** @todo What if we are injecting an exception and not an interrupt? Is that
9776 * possible here? For now we assert it is indeed only an interrupt. */
9777 if (!TRPMHasTrap(pVCpu))
9778 { /* likely */ }
9779 else
9780 {
9781 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9782 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9783 { /*likely */ }
9784 else
9785 return rcStrict;
9786 }
9787
9788 /*
9789 * Initial decoder init w/ prefetch, then setup setjmp.
9790 */
9791 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9792 if (rcStrict == VINF_SUCCESS)
9793 {
9794#ifdef IEM_WITH_SETJMP
9795 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9796 IEM_TRY_SETJMP(pVCpu, rcStrict)
9797#endif
9798 {
9799 /*
9800 * The run loop. We limit ourselves to 4096 instructions right now.
9801 */
9802 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9803 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9804 for (;;)
9805 {
9806 /*
9807 * Log the state.
9808 */
9809#ifdef LOG_ENABLED
9810 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9811#endif
9812
9813 /*
9814 * Do the decoding and emulation.
9815 */
9816 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9817 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9818#ifdef VBOX_STRICT
9819 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9820#endif
9821 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9822 {
9823 Assert(pVCpu->iem.s.cActiveMappings == 0);
9824 pVCpu->iem.s.cInstructions++;
9825
9826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9827 /* Perform any VMX nested-guest instruction boundary actions. */
9828 uint64_t fCpu = pVCpu->fLocalForcedActions;
9829 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9830 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9831 { /* likely */ }
9832 else
9833 {
9834 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9835 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9836 fCpu = pVCpu->fLocalForcedActions;
9837 else
9838 {
9839 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9840 break;
9841 }
9842 }
9843#endif
9844 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9845 {
9846#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9847 uint64_t fCpu = pVCpu->fLocalForcedActions;
9848#endif
9849 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9850 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9851 | VMCPU_FF_TLB_FLUSH
9852 | VMCPU_FF_UNHALT );
9853
9854 if (RT_LIKELY( ( !fCpu
9855 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9856 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9857 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9858 {
9859 if (--cMaxInstructionsGccStupidity > 0)
9860 {
9861 /* Poll timers every now an then according to the caller's specs. */
9862 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9863 || !TMTimerPollBool(pVM, pVCpu))
9864 {
9865 Assert(pVCpu->iem.s.cActiveMappings == 0);
9866 iemReInitDecoder(pVCpu);
9867 continue;
9868 }
9869 }
9870 }
9871 }
9872 Assert(pVCpu->iem.s.cActiveMappings == 0);
9873 }
9874 else if (pVCpu->iem.s.cActiveMappings > 0)
9875 iemMemRollback(pVCpu);
9876 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9877 break;
9878 }
9879 }
9880#ifdef IEM_WITH_SETJMP
9881 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9882 {
9883 if (pVCpu->iem.s.cActiveMappings > 0)
9884 iemMemRollback(pVCpu);
9885# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9886 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9887# endif
9888 pVCpu->iem.s.cLongJumps++;
9889 }
9890 IEM_CATCH_LONGJMP_END(pVCpu);
9891#endif
9892
9893 /*
9894 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9895 */
9896 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9897 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9898 }
9899 else
9900 {
9901 if (pVCpu->iem.s.cActiveMappings > 0)
9902 iemMemRollback(pVCpu);
9903
9904#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9905 /*
9906 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9907 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9908 */
9909 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9910#endif
9911 }
9912
9913 /*
9914 * Maybe re-enter raw-mode and log.
9915 */
9916 if (rcStrict != VINF_SUCCESS)
9917 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9918 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9919 if (pcInstructions)
9920 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9921 return rcStrict;
9922}
9923
9924
9925/**
9926 * Interface used by EMExecuteExec, does exit statistics and limits.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure.
9930 * @param fWillExit To be defined.
9931 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9932 * @param cMaxInstructions Maximum number of instructions to execute.
9933 * @param cMaxInstructionsWithoutExits
9934 * The max number of instructions without exits.
9935 * @param pStats Where to return statistics.
9936 */
9937VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9938 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9939{
9940 NOREF(fWillExit); /** @todo define flexible exit crits */
9941
9942 /*
9943 * Initialize return stats.
9944 */
9945 pStats->cInstructions = 0;
9946 pStats->cExits = 0;
9947 pStats->cMaxExitDistance = 0;
9948 pStats->cReserved = 0;
9949
9950 /*
9951 * Initial decoder init w/ prefetch, then setup setjmp.
9952 */
9953 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9954 if (rcStrict == VINF_SUCCESS)
9955 {
9956#ifdef IEM_WITH_SETJMP
9957 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9958 IEM_TRY_SETJMP(pVCpu, rcStrict)
9959#endif
9960 {
9961#ifdef IN_RING0
9962 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9963#endif
9964 uint32_t cInstructionSinceLastExit = 0;
9965
9966 /*
9967 * The run loop. We limit ourselves to 4096 instructions right now.
9968 */
9969 PVM pVM = pVCpu->CTX_SUFF(pVM);
9970 for (;;)
9971 {
9972 /*
9973 * Log the state.
9974 */
9975#ifdef LOG_ENABLED
9976 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9977#endif
9978
9979 /*
9980 * Do the decoding and emulation.
9981 */
9982 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9983
9984 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9985 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9986
9987 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9988 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9989 {
9990 pStats->cExits += 1;
9991 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9992 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9993 cInstructionSinceLastExit = 0;
9994 }
9995
9996 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9997 {
9998 Assert(pVCpu->iem.s.cActiveMappings == 0);
9999 pVCpu->iem.s.cInstructions++;
10000 pStats->cInstructions++;
10001 cInstructionSinceLastExit++;
10002
10003#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10004 /* Perform any VMX nested-guest instruction boundary actions. */
10005 uint64_t fCpu = pVCpu->fLocalForcedActions;
10006 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10007 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10008 { /* likely */ }
10009 else
10010 {
10011 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10012 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10013 fCpu = pVCpu->fLocalForcedActions;
10014 else
10015 {
10016 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10017 break;
10018 }
10019 }
10020#endif
10021 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10022 {
10023#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10024 uint64_t fCpu = pVCpu->fLocalForcedActions;
10025#endif
10026 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10027 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10028 | VMCPU_FF_TLB_FLUSH
10029 | VMCPU_FF_UNHALT );
10030 if (RT_LIKELY( ( ( !fCpu
10031 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10032 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10033 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10034 || pStats->cInstructions < cMinInstructions))
10035 {
10036 if (pStats->cInstructions < cMaxInstructions)
10037 {
10038 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10039 {
10040#ifdef IN_RING0
10041 if ( !fCheckPreemptionPending
10042 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10043#endif
10044 {
10045 Assert(pVCpu->iem.s.cActiveMappings == 0);
10046 iemReInitDecoder(pVCpu);
10047 continue;
10048 }
10049#ifdef IN_RING0
10050 rcStrict = VINF_EM_RAW_INTERRUPT;
10051 break;
10052#endif
10053 }
10054 }
10055 }
10056 Assert(!(fCpu & VMCPU_FF_IEM));
10057 }
10058 Assert(pVCpu->iem.s.cActiveMappings == 0);
10059 }
10060 else if (pVCpu->iem.s.cActiveMappings > 0)
10061 iemMemRollback(pVCpu);
10062 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10063 break;
10064 }
10065 }
10066#ifdef IEM_WITH_SETJMP
10067 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10068 {
10069 if (pVCpu->iem.s.cActiveMappings > 0)
10070 iemMemRollback(pVCpu);
10071 pVCpu->iem.s.cLongJumps++;
10072 }
10073 IEM_CATCH_LONGJMP_END(pVCpu);
10074#endif
10075
10076 /*
10077 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10078 */
10079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10081 }
10082 else
10083 {
10084 if (pVCpu->iem.s.cActiveMappings > 0)
10085 iemMemRollback(pVCpu);
10086
10087#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10088 /*
10089 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10090 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10091 */
10092 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10093#endif
10094 }
10095
10096 /*
10097 * Maybe re-enter raw-mode and log.
10098 */
10099 if (rcStrict != VINF_SUCCESS)
10100 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10102 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10103 return rcStrict;
10104}
10105
10106
10107/**
10108 * Injects a trap, fault, abort, software interrupt or external interrupt.
10109 *
10110 * The parameter list matches TRPMQueryTrapAll pretty closely.
10111 *
10112 * @returns Strict VBox status code.
10113 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10114 * @param u8TrapNo The trap number.
10115 * @param enmType What type is it (trap/fault/abort), software
10116 * interrupt or hardware interrupt.
10117 * @param uErrCode The error code if applicable.
10118 * @param uCr2 The CR2 value if applicable.
10119 * @param cbInstr The instruction length (only relevant for
10120 * software interrupts).
10121 */
10122VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10123 uint8_t cbInstr)
10124{
10125 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10126#ifdef DBGFTRACE_ENABLED
10127 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10128 u8TrapNo, enmType, uErrCode, uCr2);
10129#endif
10130
10131 uint32_t fFlags;
10132 switch (enmType)
10133 {
10134 case TRPM_HARDWARE_INT:
10135 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10136 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10137 uErrCode = uCr2 = 0;
10138 break;
10139
10140 case TRPM_SOFTWARE_INT:
10141 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10142 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10143 uErrCode = uCr2 = 0;
10144 break;
10145
10146 case TRPM_TRAP:
10147 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10148 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10149 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10150 if (u8TrapNo == X86_XCPT_PF)
10151 fFlags |= IEM_XCPT_FLAGS_CR2;
10152 switch (u8TrapNo)
10153 {
10154 case X86_XCPT_DF:
10155 case X86_XCPT_TS:
10156 case X86_XCPT_NP:
10157 case X86_XCPT_SS:
10158 case X86_XCPT_PF:
10159 case X86_XCPT_AC:
10160 case X86_XCPT_GP:
10161 fFlags |= IEM_XCPT_FLAGS_ERR;
10162 break;
10163 }
10164 break;
10165
10166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10167 }
10168
10169 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10170
10171 if (pVCpu->iem.s.cActiveMappings > 0)
10172 iemMemRollback(pVCpu);
10173
10174 return rcStrict;
10175}
10176
10177
10178/**
10179 * Injects the active TRPM event.
10180 *
10181 * @returns Strict VBox status code.
10182 * @param pVCpu The cross context virtual CPU structure.
10183 */
10184VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10185{
10186#ifndef IEM_IMPLEMENTS_TASKSWITCH
10187 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10188#else
10189 uint8_t u8TrapNo;
10190 TRPMEVENT enmType;
10191 uint32_t uErrCode;
10192 RTGCUINTPTR uCr2;
10193 uint8_t cbInstr;
10194 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10195 if (RT_FAILURE(rc))
10196 return rc;
10197
10198 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10199 * ICEBP \#DB injection as a special case. */
10200 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10201#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10202 if (rcStrict == VINF_SVM_VMEXIT)
10203 rcStrict = VINF_SUCCESS;
10204#endif
10205#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10206 if (rcStrict == VINF_VMX_VMEXIT)
10207 rcStrict = VINF_SUCCESS;
10208#endif
10209 /** @todo Are there any other codes that imply the event was successfully
10210 * delivered to the guest? See @bugref{6607}. */
10211 if ( rcStrict == VINF_SUCCESS
10212 || rcStrict == VINF_IEM_RAISED_XCPT)
10213 TRPMResetTrap(pVCpu);
10214
10215 return rcStrict;
10216#endif
10217}
10218
10219
10220VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10221{
10222 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10223 return VERR_NOT_IMPLEMENTED;
10224}
10225
10226
10227VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10228{
10229 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10230 return VERR_NOT_IMPLEMENTED;
10231}
10232
10233
10234/**
10235 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10236 *
10237 * This API ASSUMES that the caller has already verified that the guest code is
10238 * allowed to access the I/O port. (The I/O port is in the DX register in the
10239 * guest state.)
10240 *
10241 * @returns Strict VBox status code.
10242 * @param pVCpu The cross context virtual CPU structure.
10243 * @param cbValue The size of the I/O port access (1, 2, or 4).
10244 * @param enmAddrMode The addressing mode.
10245 * @param fRepPrefix Indicates whether a repeat prefix is used
10246 * (doesn't matter which for this instruction).
10247 * @param cbInstr The instruction length in bytes.
10248 * @param iEffSeg The effective segment address.
10249 * @param fIoChecked Whether the access to the I/O port has been
10250 * checked or not. It's typically checked in the
10251 * HM scenario.
10252 */
10253VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10254 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10255{
10256 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10257 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10258
10259 /*
10260 * State init.
10261 */
10262 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10263
10264 /*
10265 * Switch orgy for getting to the right handler.
10266 */
10267 VBOXSTRICTRC rcStrict;
10268 if (fRepPrefix)
10269 {
10270 switch (enmAddrMode)
10271 {
10272 case IEMMODE_16BIT:
10273 switch (cbValue)
10274 {
10275 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10276 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10277 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10278 default:
10279 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10280 }
10281 break;
10282
10283 case IEMMODE_32BIT:
10284 switch (cbValue)
10285 {
10286 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10287 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10288 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10289 default:
10290 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10291 }
10292 break;
10293
10294 case IEMMODE_64BIT:
10295 switch (cbValue)
10296 {
10297 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10298 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10299 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10300 default:
10301 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10302 }
10303 break;
10304
10305 default:
10306 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10307 }
10308 }
10309 else
10310 {
10311 switch (enmAddrMode)
10312 {
10313 case IEMMODE_16BIT:
10314 switch (cbValue)
10315 {
10316 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10317 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10318 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10319 default:
10320 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10321 }
10322 break;
10323
10324 case IEMMODE_32BIT:
10325 switch (cbValue)
10326 {
10327 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10328 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10329 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10330 default:
10331 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10332 }
10333 break;
10334
10335 case IEMMODE_64BIT:
10336 switch (cbValue)
10337 {
10338 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10339 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10340 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10341 default:
10342 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10343 }
10344 break;
10345
10346 default:
10347 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10348 }
10349 }
10350
10351 if (pVCpu->iem.s.cActiveMappings)
10352 iemMemRollback(pVCpu);
10353
10354 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10355}
10356
10357
10358/**
10359 * Interface for HM and EM for executing string I/O IN (read) instructions.
10360 *
10361 * This API ASSUMES that the caller has already verified that the guest code is
10362 * allowed to access the I/O port. (The I/O port is in the DX register in the
10363 * guest state.)
10364 *
10365 * @returns Strict VBox status code.
10366 * @param pVCpu The cross context virtual CPU structure.
10367 * @param cbValue The size of the I/O port access (1, 2, or 4).
10368 * @param enmAddrMode The addressing mode.
10369 * @param fRepPrefix Indicates whether a repeat prefix is used
10370 * (doesn't matter which for this instruction).
10371 * @param cbInstr The instruction length in bytes.
10372 * @param fIoChecked Whether the access to the I/O port has been
10373 * checked or not. It's typically checked in the
10374 * HM scenario.
10375 */
10376VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10377 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10378{
10379 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10380
10381 /*
10382 * State init.
10383 */
10384 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10385
10386 /*
10387 * Switch orgy for getting to the right handler.
10388 */
10389 VBOXSTRICTRC rcStrict;
10390 if (fRepPrefix)
10391 {
10392 switch (enmAddrMode)
10393 {
10394 case IEMMODE_16BIT:
10395 switch (cbValue)
10396 {
10397 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10398 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10399 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10400 default:
10401 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10402 }
10403 break;
10404
10405 case IEMMODE_32BIT:
10406 switch (cbValue)
10407 {
10408 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10409 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10410 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10411 default:
10412 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10413 }
10414 break;
10415
10416 case IEMMODE_64BIT:
10417 switch (cbValue)
10418 {
10419 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10420 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10421 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10422 default:
10423 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10424 }
10425 break;
10426
10427 default:
10428 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10429 }
10430 }
10431 else
10432 {
10433 switch (enmAddrMode)
10434 {
10435 case IEMMODE_16BIT:
10436 switch (cbValue)
10437 {
10438 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10439 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10440 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10441 default:
10442 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10443 }
10444 break;
10445
10446 case IEMMODE_32BIT:
10447 switch (cbValue)
10448 {
10449 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10450 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10451 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10452 default:
10453 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10454 }
10455 break;
10456
10457 case IEMMODE_64BIT:
10458 switch (cbValue)
10459 {
10460 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10461 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10462 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10463 default:
10464 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10465 }
10466 break;
10467
10468 default:
10469 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10470 }
10471 }
10472
10473 if ( pVCpu->iem.s.cActiveMappings == 0
10474 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10475 { /* likely */ }
10476 else
10477 {
10478 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10479 iemMemRollback(pVCpu);
10480 }
10481 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10482}
10483
10484
10485/**
10486 * Interface for rawmode to write execute an OUT instruction.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure.
10490 * @param cbInstr The instruction length in bytes.
10491 * @param u16Port The port to read.
10492 * @param fImm Whether the port is specified using an immediate operand or
10493 * using the implicit DX register.
10494 * @param cbReg The register size.
10495 *
10496 * @remarks In ring-0 not all of the state needs to be synced in.
10497 */
10498VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10499{
10500 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10501 Assert(cbReg <= 4 && cbReg != 3);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10505 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10506 Assert(!pVCpu->iem.s.cActiveMappings);
10507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10508}
10509
10510
10511/**
10512 * Interface for rawmode to write execute an IN instruction.
10513 *
10514 * @returns Strict VBox status code.
10515 * @param pVCpu The cross context virtual CPU structure.
10516 * @param cbInstr The instruction length in bytes.
10517 * @param u16Port The port to read.
10518 * @param fImm Whether the port is specified using an immediate operand or
10519 * using the implicit DX.
10520 * @param cbReg The register size.
10521 */
10522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10523{
10524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10525 Assert(cbReg <= 4 && cbReg != 3);
10526
10527 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10529 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10530 Assert(!pVCpu->iem.s.cActiveMappings);
10531 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10532}
10533
10534
10535/**
10536 * Interface for HM and EM to write to a CRx register.
10537 *
10538 * @returns Strict VBox status code.
10539 * @param pVCpu The cross context virtual CPU structure.
10540 * @param cbInstr The instruction length in bytes.
10541 * @param iCrReg The control register number (destination).
10542 * @param iGReg The general purpose register number (source).
10543 *
10544 * @remarks In ring-0 not all of the state needs to be synced in.
10545 */
10546VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10547{
10548 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10549 Assert(iCrReg < 16);
10550 Assert(iGReg < 16);
10551
10552 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10554 Assert(!pVCpu->iem.s.cActiveMappings);
10555 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10556}
10557
10558
10559/**
10560 * Interface for HM and EM to read from a CRx register.
10561 *
10562 * @returns Strict VBox status code.
10563 * @param pVCpu The cross context virtual CPU structure.
10564 * @param cbInstr The instruction length in bytes.
10565 * @param iGReg The general purpose register number (destination).
10566 * @param iCrReg The control register number (source).
10567 *
10568 * @remarks In ring-0 not all of the state needs to be synced in.
10569 */
10570VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10571{
10572 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10573 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10574 | CPUMCTX_EXTRN_APIC_TPR);
10575 Assert(iCrReg < 16);
10576 Assert(iGReg < 16);
10577
10578 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10579 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10580 Assert(!pVCpu->iem.s.cActiveMappings);
10581 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10582}
10583
10584
10585/**
10586 * Interface for HM and EM to write to a DRx register.
10587 *
10588 * @returns Strict VBox status code.
10589 * @param pVCpu The cross context virtual CPU structure.
10590 * @param cbInstr The instruction length in bytes.
10591 * @param iDrReg The debug register number (destination).
10592 * @param iGReg The general purpose register number (source).
10593 *
10594 * @remarks In ring-0 not all of the state needs to be synced in.
10595 */
10596VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10597{
10598 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10599 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10600 Assert(iDrReg < 8);
10601 Assert(iGReg < 16);
10602
10603 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10604 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10605 Assert(!pVCpu->iem.s.cActiveMappings);
10606 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10607}
10608
10609
10610/**
10611 * Interface for HM and EM to read from a DRx register.
10612 *
10613 * @returns Strict VBox status code.
10614 * @param pVCpu The cross context virtual CPU structure.
10615 * @param cbInstr The instruction length in bytes.
10616 * @param iGReg The general purpose register number (destination).
10617 * @param iDrReg The debug register number (source).
10618 *
10619 * @remarks In ring-0 not all of the state needs to be synced in.
10620 */
10621VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10622{
10623 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10624 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10625 Assert(iDrReg < 8);
10626 Assert(iGReg < 16);
10627
10628 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10630 Assert(!pVCpu->iem.s.cActiveMappings);
10631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10632}
10633
10634
10635/**
10636 * Interface for HM and EM to clear the CR0[TS] bit.
10637 *
10638 * @returns Strict VBox status code.
10639 * @param pVCpu The cross context virtual CPU structure.
10640 * @param cbInstr The instruction length in bytes.
10641 *
10642 * @remarks In ring-0 not all of the state needs to be synced in.
10643 */
10644VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10645{
10646 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10647
10648 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10649 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10650 Assert(!pVCpu->iem.s.cActiveMappings);
10651 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10652}
10653
10654
10655/**
10656 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10657 *
10658 * @returns Strict VBox status code.
10659 * @param pVCpu The cross context virtual CPU structure.
10660 * @param cbInstr The instruction length in bytes.
10661 * @param uValue The value to load into CR0.
10662 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10663 * memory operand. Otherwise pass NIL_RTGCPTR.
10664 *
10665 * @remarks In ring-0 not all of the state needs to be synced in.
10666 */
10667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10668{
10669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10670
10671 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10673 Assert(!pVCpu->iem.s.cActiveMappings);
10674 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10675}
10676
10677
10678/**
10679 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10680 *
10681 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10682 *
10683 * @returns Strict VBox status code.
10684 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10685 * @param cbInstr The instruction length in bytes.
10686 * @remarks In ring-0 not all of the state needs to be synced in.
10687 * @thread EMT(pVCpu)
10688 */
10689VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10690{
10691 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10692
10693 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10694 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10695 Assert(!pVCpu->iem.s.cActiveMappings);
10696 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10697}
10698
10699
10700/**
10701 * Interface for HM and EM to emulate the WBINVD instruction.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure.
10705 * @param cbInstr The instruction length in bytes.
10706 *
10707 * @remarks In ring-0 not all of the state needs to be synced in.
10708 */
10709VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10710{
10711 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10712
10713 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10714 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10715 Assert(!pVCpu->iem.s.cActiveMappings);
10716 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10717}
10718
10719
10720/**
10721 * Interface for HM and EM to emulate the INVD instruction.
10722 *
10723 * @returns Strict VBox status code.
10724 * @param pVCpu The cross context virtual CPU structure.
10725 * @param cbInstr The instruction length in bytes.
10726 *
10727 * @remarks In ring-0 not all of the state needs to be synced in.
10728 */
10729VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10730{
10731 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10732
10733 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10734 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10735 Assert(!pVCpu->iem.s.cActiveMappings);
10736 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10737}
10738
10739
10740/**
10741 * Interface for HM and EM to emulate the INVLPG instruction.
10742 *
10743 * @returns Strict VBox status code.
10744 * @retval VINF_PGM_SYNC_CR3
10745 *
10746 * @param pVCpu The cross context virtual CPU structure.
10747 * @param cbInstr The instruction length in bytes.
10748 * @param GCPtrPage The effective address of the page to invalidate.
10749 *
10750 * @remarks In ring-0 not all of the state needs to be synced in.
10751 */
10752VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10753{
10754 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10755
10756 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10758 Assert(!pVCpu->iem.s.cActiveMappings);
10759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10760}
10761
10762
10763/**
10764 * Interface for HM and EM to emulate the INVPCID instruction.
10765 *
10766 * @returns Strict VBox status code.
10767 * @retval VINF_PGM_SYNC_CR3
10768 *
10769 * @param pVCpu The cross context virtual CPU structure.
10770 * @param cbInstr The instruction length in bytes.
10771 * @param iEffSeg The effective segment register.
10772 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10773 * @param uType The invalidation type.
10774 *
10775 * @remarks In ring-0 not all of the state needs to be synced in.
10776 */
10777VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10778 uint64_t uType)
10779{
10780 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10781
10782 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10783 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10784 Assert(!pVCpu->iem.s.cActiveMappings);
10785 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10786}
10787
10788
10789/**
10790 * Interface for HM and EM to emulate the CPUID instruction.
10791 *
10792 * @returns Strict VBox status code.
10793 *
10794 * @param pVCpu The cross context virtual CPU structure.
10795 * @param cbInstr The instruction length in bytes.
10796 *
10797 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10798 */
10799VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10800{
10801 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10802 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10803
10804 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10805 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10806 Assert(!pVCpu->iem.s.cActiveMappings);
10807 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10808}
10809
10810
10811/**
10812 * Interface for HM and EM to emulate the RDPMC instruction.
10813 *
10814 * @returns Strict VBox status code.
10815 *
10816 * @param pVCpu The cross context virtual CPU structure.
10817 * @param cbInstr The instruction length in bytes.
10818 *
10819 * @remarks Not all of the state needs to be synced in.
10820 */
10821VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10822{
10823 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10824 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10825
10826 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10827 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10828 Assert(!pVCpu->iem.s.cActiveMappings);
10829 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10830}
10831
10832
10833/**
10834 * Interface for HM and EM to emulate the RDTSC instruction.
10835 *
10836 * @returns Strict VBox status code.
10837 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10838 *
10839 * @param pVCpu The cross context virtual CPU structure.
10840 * @param cbInstr The instruction length in bytes.
10841 *
10842 * @remarks Not all of the state needs to be synced in.
10843 */
10844VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10845{
10846 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10847 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10848
10849 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10850 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10851 Assert(!pVCpu->iem.s.cActiveMappings);
10852 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10853}
10854
10855
10856/**
10857 * Interface for HM and EM to emulate the RDTSCP instruction.
10858 *
10859 * @returns Strict VBox status code.
10860 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10861 *
10862 * @param pVCpu The cross context virtual CPU structure.
10863 * @param cbInstr The instruction length in bytes.
10864 *
10865 * @remarks Not all of the state needs to be synced in. Recommended
10866 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10867 */
10868VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10869{
10870 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10871 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10872
10873 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10874 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10875 Assert(!pVCpu->iem.s.cActiveMappings);
10876 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10877}
10878
10879
10880/**
10881 * Interface for HM and EM to emulate the RDMSR instruction.
10882 *
10883 * @returns Strict VBox status code.
10884 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10885 *
10886 * @param pVCpu The cross context virtual CPU structure.
10887 * @param cbInstr The instruction length in bytes.
10888 *
10889 * @remarks Not all of the state needs to be synced in. Requires RCX and
10890 * (currently) all MSRs.
10891 */
10892VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10893{
10894 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10895 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10896
10897 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10898 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10899 Assert(!pVCpu->iem.s.cActiveMappings);
10900 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10901}
10902
10903
10904/**
10905 * Interface for HM and EM to emulate the WRMSR instruction.
10906 *
10907 * @returns Strict VBox status code.
10908 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10909 *
10910 * @param pVCpu The cross context virtual CPU structure.
10911 * @param cbInstr The instruction length in bytes.
10912 *
10913 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10914 * and (currently) all MSRs.
10915 */
10916VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10917{
10918 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10919 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10920 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10921
10922 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10923 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10924 Assert(!pVCpu->iem.s.cActiveMappings);
10925 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10926}
10927
10928
10929/**
10930 * Interface for HM and EM to emulate the MONITOR instruction.
10931 *
10932 * @returns Strict VBox status code.
10933 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10934 *
10935 * @param pVCpu The cross context virtual CPU structure.
10936 * @param cbInstr The instruction length in bytes.
10937 *
10938 * @remarks Not all of the state needs to be synced in.
10939 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10940 * are used.
10941 */
10942VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10943{
10944 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10945 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10946
10947 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10948 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10949 Assert(!pVCpu->iem.s.cActiveMappings);
10950 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10951}
10952
10953
10954/**
10955 * Interface for HM and EM to emulate the MWAIT instruction.
10956 *
10957 * @returns Strict VBox status code.
10958 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10959 *
10960 * @param pVCpu The cross context virtual CPU structure.
10961 * @param cbInstr The instruction length in bytes.
10962 *
10963 * @remarks Not all of the state needs to be synced in.
10964 */
10965VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10966{
10967 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10968 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10969
10970 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10971 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10972 Assert(!pVCpu->iem.s.cActiveMappings);
10973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10974}
10975
10976
10977/**
10978 * Interface for HM and EM to emulate the HLT instruction.
10979 *
10980 * @returns Strict VBox status code.
10981 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10982 *
10983 * @param pVCpu The cross context virtual CPU structure.
10984 * @param cbInstr The instruction length in bytes.
10985 *
10986 * @remarks Not all of the state needs to be synced in.
10987 */
10988VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10989{
10990 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10991
10992 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10993 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10994 Assert(!pVCpu->iem.s.cActiveMappings);
10995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10996}
10997
10998
10999/**
11000 * Checks if IEM is in the process of delivering an event (interrupt or
11001 * exception).
11002 *
11003 * @returns true if we're in the process of raising an interrupt or exception,
11004 * false otherwise.
11005 * @param pVCpu The cross context virtual CPU structure.
11006 * @param puVector Where to store the vector associated with the
11007 * currently delivered event, optional.
11008 * @param pfFlags Where to store th event delivery flags (see
11009 * IEM_XCPT_FLAGS_XXX), optional.
11010 * @param puErr Where to store the error code associated with the
11011 * event, optional.
11012 * @param puCr2 Where to store the CR2 associated with the event,
11013 * optional.
11014 * @remarks The caller should check the flags to determine if the error code and
11015 * CR2 are valid for the event.
11016 */
11017VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11018{
11019 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11020 if (fRaisingXcpt)
11021 {
11022 if (puVector)
11023 *puVector = pVCpu->iem.s.uCurXcpt;
11024 if (pfFlags)
11025 *pfFlags = pVCpu->iem.s.fCurXcpt;
11026 if (puErr)
11027 *puErr = pVCpu->iem.s.uCurXcptErr;
11028 if (puCr2)
11029 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11030 }
11031 return fRaisingXcpt;
11032}
11033
11034#ifdef IN_RING3
11035
11036/**
11037 * Handles the unlikely and probably fatal merge cases.
11038 *
11039 * @returns Merged status code.
11040 * @param rcStrict Current EM status code.
11041 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11042 * with @a rcStrict.
11043 * @param iMemMap The memory mapping index. For error reporting only.
11044 * @param pVCpu The cross context virtual CPU structure of the calling
11045 * thread, for error reporting only.
11046 */
11047DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11048 unsigned iMemMap, PVMCPUCC pVCpu)
11049{
11050 if (RT_FAILURE_NP(rcStrict))
11051 return rcStrict;
11052
11053 if (RT_FAILURE_NP(rcStrictCommit))
11054 return rcStrictCommit;
11055
11056 if (rcStrict == rcStrictCommit)
11057 return rcStrictCommit;
11058
11059 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11060 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11061 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11062 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11063 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11064 return VERR_IOM_FF_STATUS_IPE;
11065}
11066
11067
11068/**
11069 * Helper for IOMR3ProcessForceFlag.
11070 *
11071 * @returns Merged status code.
11072 * @param rcStrict Current EM status code.
11073 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11074 * with @a rcStrict.
11075 * @param iMemMap The memory mapping index. For error reporting only.
11076 * @param pVCpu The cross context virtual CPU structure of the calling
11077 * thread, for error reporting only.
11078 */
11079DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11080{
11081 /* Simple. */
11082 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11083 return rcStrictCommit;
11084
11085 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11086 return rcStrict;
11087
11088 /* EM scheduling status codes. */
11089 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11090 && rcStrict <= VINF_EM_LAST))
11091 {
11092 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11093 && rcStrictCommit <= VINF_EM_LAST))
11094 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11095 }
11096
11097 /* Unlikely */
11098 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11099}
11100
11101
11102/**
11103 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11104 *
11105 * @returns Merge between @a rcStrict and what the commit operation returned.
11106 * @param pVM The cross context VM structure.
11107 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11108 * @param rcStrict The status code returned by ring-0 or raw-mode.
11109 */
11110VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11111{
11112 /*
11113 * Reset the pending commit.
11114 */
11115 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11116 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11117 ("%#x %#x %#x\n",
11118 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11119 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11120
11121 /*
11122 * Commit the pending bounce buffers (usually just one).
11123 */
11124 unsigned cBufs = 0;
11125 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11126 while (iMemMap-- > 0)
11127 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11128 {
11129 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11130 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11131 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11132
11133 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11134 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11135 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11136
11137 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11138 {
11139 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11141 pbBuf,
11142 cbFirst,
11143 PGMACCESSORIGIN_IEM);
11144 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11145 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11146 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11147 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11148 }
11149
11150 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11151 {
11152 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11153 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11154 pbBuf + cbFirst,
11155 cbSecond,
11156 PGMACCESSORIGIN_IEM);
11157 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11158 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11159 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11160 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11161 }
11162 cBufs++;
11163 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11164 }
11165
11166 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11167 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11168 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11169 pVCpu->iem.s.cActiveMappings = 0;
11170 return rcStrict;
11171}
11172
11173#endif /* IN_RING3 */
11174
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette