VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105590

Last change on this file since 105590 was 105579, checked in by vboxsync, 8 months ago

VMM/IEM: Fixed another bug in the large page TLB invalidation code which caused it to only evict half of the pages. More TLB tracing event work. bugref:10727

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 461.2 KB
Line 
1/* $Id: IEMAll.cpp 105579 2024-08-02 21:10:41Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729 if (a_fGlobal)
730 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
731 else
732 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
733# endif
734
735# ifdef IEM_WITH_DATA_TLB
736 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
737 if (a_fGlobal)
738 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
739 else
740 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
741# endif
742#else
743 RT_NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates non-global the IEM TLB entries.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVCpu The cross context virtual CPU structure of the calling
754 * thread.
755 */
756VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
757{
758 iemTlbInvalidateAll<false>(pVCpu);
759}
760
761
762/**
763 * Invalidates all the IEM TLB entries.
764 *
765 * This is called internally as well as by PGM when moving GC mappings.
766 *
767 * @param pVCpu The cross context virtual CPU structure of the calling
768 * thread.
769 */
770VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
771{
772 iemTlbInvalidateAll<true>(pVCpu);
773}
774
775
776#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
777
778template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
779DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
780{
781 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
782
783 /* Combine TAG values with the TLB revisions. */
784 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
785 if (a_fNonGlobal)
786 GCPtrTag |= pTlb->uTlbRevision;
787
788 /* Set up the scan. */
789 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024);
790 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
791 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
792 RTGCPTR const GCPtrTagMask = fPartialScan
793 ? ~(RTGCPTR)0
794 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
795 & ~(RTGCPTR)( (RT_BIT_64(RT_MAX( (a_f2MbLargePage ? 9 : 10)
796 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO, 0)) - 1U)
797 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
798
799 /*
800 * Do the scanning.
801 */
802 for (; idxEven < idxEvenEnd; idxEven += 2)
803 {
804 if (a_fNonGlobal)
805 {
806 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
807 {
808 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
809 {
810 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
811 pTlb->aEntries[idxEven].uTag = 0;
812 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
813 pVCpu->iem.s.cbInstrBufTotal = 0;
814 }
815 }
816 GCPtrTag++;
817 }
818
819 if (a_fGlobal)
820 {
821 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
822 {
823 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
824 {
825 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
826 pTlb->aEntries[idxEven + 1].uTag = 0;
827 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
828 pVCpu->iem.s.cbInstrBufTotal = 0;
829 }
830 }
831 GCPtrTagGlob++;
832 }
833 }
834
835}
836
837template<bool const a_fDataTlb, bool const a_f2MbLargePage>
838DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
839{
840 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
841
842 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
843 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
844 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
845 {
846 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
847 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
848 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
849 else
850 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
851 }
852 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
853 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
854 {
855 /* Large pages aren't as likely in the non-global TLB half. */
856 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
857 }
858 else
859 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
860}
861
862template<bool const a_fDataTlb>
863DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
864{
865 /*
866 * Flush the entry pair.
867 */
868 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
869 {
870 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
871 pTlb->aEntries[idxEven].uTag = 0;
872 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
873 pVCpu->iem.s.cbInstrBufTotal = 0;
874 }
875 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
876 {
877 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
878 pTlb->aEntries[idxEven + 1].uTag = 0;
879 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
880 pVCpu->iem.s.cbInstrBufTotal = 0;
881 }
882
883 /*
884 * If there are (or has been) large pages in the TLB, we must check if the
885 * address being flushed may involve one of those, as then we'd have to
886 * scan for entries relating to the same page and flush those as well.
887 */
888# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
889 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
890# else
891 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
892# endif
893 {
894 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
895 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
896 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
897 else
898 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
899 }
900}
901
902#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
903
904/**
905 * Invalidates a page in the TLBs.
906 *
907 * @param pVCpu The cross context virtual CPU structure of the calling
908 * thread.
909 * @param GCPtr The address of the page to invalidate
910 * @thread EMT(pVCpu)
911 */
912VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
913{
914 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
915#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
916 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
917 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
918 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
919 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
920
921# ifdef IEM_WITH_CODE_TLB
922 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
923# endif
924# ifdef IEM_WITH_DATA_TLB
925 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
926# endif
927#else
928 NOREF(pVCpu); NOREF(GCPtr);
929#endif
930}
931
932
933#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
934/**
935 * Invalid both TLBs slow fashion following a rollover.
936 *
937 * Worker for IEMTlbInvalidateAllPhysical,
938 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
939 * iemMemMapJmp and others.
940 *
941 * @thread EMT(pVCpu)
942 */
943static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
944{
945 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
946 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
947 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
948
949 unsigned i;
950# ifdef IEM_WITH_CODE_TLB
951 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
952 while (i-- > 0)
953 {
954 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
955 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
956 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
957 }
958 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
959 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
960# endif
961# ifdef IEM_WITH_DATA_TLB
962 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
963 while (i-- > 0)
964 {
965 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
966 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
967 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
968 }
969 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
970 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
971# endif
972
973}
974#endif
975
976
977/**
978 * Invalidates the host physical aspects of the IEM TLBs.
979 *
980 * This is called internally as well as by PGM when moving GC mappings.
981 *
982 * @param pVCpu The cross context virtual CPU structure of the calling
983 * thread.
984 * @note Currently not used.
985 */
986VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
987{
988#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
989 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
990 Log10(("IEMTlbInvalidateAllPhysical\n"));
991
992# ifdef IEM_WITH_CODE_TLB
993 pVCpu->iem.s.cbInstrBufTotal = 0;
994# endif
995 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
996 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
997 {
998 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
999 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1000 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1001 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1002 }
1003 else
1004 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1005#else
1006 NOREF(pVCpu);
1007#endif
1008}
1009
1010
1011/**
1012 * Invalidates the host physical aspects of the IEM TLBs.
1013 *
1014 * This is called internally as well as by PGM when moving GC mappings.
1015 *
1016 * @param pVM The cross context VM structure.
1017 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1018 * otherwise NIL_VMCPUID.
1019 * @param enmReason The reason we're called.
1020 *
1021 * @remarks Caller holds the PGM lock.
1022 */
1023VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1024{
1025#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1026 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1027 if (pVCpuCaller)
1028 VMCPU_ASSERT_EMT(pVCpuCaller);
1029 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1030
1031 VMCC_FOR_EACH_VMCPU(pVM)
1032 {
1033# ifdef IEM_WITH_CODE_TLB
1034 if (pVCpuCaller == pVCpu)
1035 pVCpu->iem.s.cbInstrBufTotal = 0;
1036# endif
1037
1038 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1039 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1040 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1041 { /* likely */}
1042 else if (pVCpuCaller != pVCpu)
1043 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1044 else
1045 {
1046 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1047 continue;
1048 }
1049 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1050 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1051
1052 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1053 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1054 }
1055 VMCC_FOR_EACH_VMCPU_END(pVM);
1056
1057#else
1058 RT_NOREF(pVM, idCpuCaller, enmReason);
1059#endif
1060}
1061
1062
1063/**
1064 * Flushes the prefetch buffer, light version.
1065 */
1066void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1067{
1068#ifndef IEM_WITH_CODE_TLB
1069 pVCpu->iem.s.cbOpcode = cbInstr;
1070#else
1071 RT_NOREF(pVCpu, cbInstr);
1072#endif
1073}
1074
1075
1076/**
1077 * Flushes the prefetch buffer, heavy version.
1078 */
1079void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1080{
1081#ifndef IEM_WITH_CODE_TLB
1082 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1083#elif 1
1084 pVCpu->iem.s.cbInstrBufTotal = 0;
1085 RT_NOREF(cbInstr);
1086#else
1087 RT_NOREF(pVCpu, cbInstr);
1088#endif
1089}
1090
1091
1092
1093#ifdef IEM_WITH_CODE_TLB
1094
1095/**
1096 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1097 * failure and jumps.
1098 *
1099 * We end up here for a number of reasons:
1100 * - pbInstrBuf isn't yet initialized.
1101 * - Advancing beyond the buffer boundrary (e.g. cross page).
1102 * - Advancing beyond the CS segment limit.
1103 * - Fetching from non-mappable page (e.g. MMIO).
1104 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the
1107 * calling thread.
1108 * @param pvDst Where to return the bytes.
1109 * @param cbDst Number of bytes to read. A value of zero is
1110 * allowed for initializing pbInstrBuf (the
1111 * recompiler does this). In this case it is best
1112 * to set pbInstrBuf to NULL prior to the call.
1113 */
1114void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1115{
1116# ifdef IN_RING3
1117 for (;;)
1118 {
1119 Assert(cbDst <= 8);
1120 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1121
1122 /*
1123 * We might have a partial buffer match, deal with that first to make the
1124 * rest simpler. This is the first part of the cross page/buffer case.
1125 */
1126 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1127 if (pbInstrBuf != NULL)
1128 {
1129 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1130 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1131 if (offBuf < cbInstrBuf)
1132 {
1133 Assert(offBuf + cbDst > cbInstrBuf);
1134 uint32_t const cbCopy = cbInstrBuf - offBuf;
1135 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1136
1137 cbDst -= cbCopy;
1138 pvDst = (uint8_t *)pvDst + cbCopy;
1139 offBuf += cbCopy;
1140 }
1141 }
1142
1143 /*
1144 * Check segment limit, figuring how much we're allowed to access at this point.
1145 *
1146 * We will fault immediately if RIP is past the segment limit / in non-canonical
1147 * territory. If we do continue, there are one or more bytes to read before we
1148 * end up in trouble and we need to do that first before faulting.
1149 */
1150 RTGCPTR GCPtrFirst;
1151 uint32_t cbMaxRead;
1152 if (IEM_IS_64BIT_CODE(pVCpu))
1153 {
1154 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1155 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1156 { /* likely */ }
1157 else
1158 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1159 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1160 }
1161 else
1162 {
1163 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1164 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1165 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1166 { /* likely */ }
1167 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1168 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1169 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1170 if (cbMaxRead != 0)
1171 { /* likely */ }
1172 else
1173 {
1174 /* Overflowed because address is 0 and limit is max. */
1175 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1176 cbMaxRead = X86_PAGE_SIZE;
1177 }
1178 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1179 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1180 if (cbMaxRead2 < cbMaxRead)
1181 cbMaxRead = cbMaxRead2;
1182 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1183 }
1184
1185 /*
1186 * Get the TLB entry for this piece of code.
1187 */
1188 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1189 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1190 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1191 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1192 {
1193 /* likely when executing lots of code, otherwise unlikely */
1194# ifdef IEM_WITH_TLB_STATISTICS
1195 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1196# endif
1197 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1198
1199 /* Check TLB page table level access flags. */
1200 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1201 {
1202 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1203 {
1204 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1205 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1206 }
1207 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1208 {
1209 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1210 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1211 }
1212 }
1213
1214 /* Look up the physical page info if necessary. */
1215 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1216 { /* not necessary */ }
1217 else
1218 {
1219 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1220 { /* likely */ }
1221 else
1222 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1223 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1224 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1225 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1226 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1227 }
1228 }
1229 else
1230 {
1231 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1232
1233 /* This page table walking will set A bits as required by the access while performing the walk.
1234 ASSUMES these are set when the address is translated rather than on commit... */
1235 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1236 PGMPTWALKFAST WalkFast;
1237 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1238 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1239 &WalkFast);
1240 if (RT_SUCCESS(rc))
1241 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1242 else
1243 {
1244#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1245 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1246 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1247#endif
1248 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1249 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1250 }
1251
1252 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1253 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1254 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1255 {
1256 pTlbe--;
1257 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1258 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1259 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1260 }
1261 else
1262 {
1263 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1264 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1265 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1266 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1267 }
1268 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1269 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1270 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1271 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1272 pTlbe->GCPhys = GCPhysPg;
1273 pTlbe->pbMappingR3 = NULL;
1274 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1275 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1276 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1277
1278 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
1279 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1280 else
1281 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1282
1283 /* Resolve the physical address. */
1284 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1285 { /* likely */ }
1286 else
1287 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1288 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1289 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1290 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1291 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1292 }
1293
1294# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1295 /*
1296 * Try do a direct read using the pbMappingR3 pointer.
1297 * Note! Do not recheck the physical TLB revision number here as we have the
1298 * wrong response to changes in the else case. If someone is updating
1299 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1300 * pretending we always won the race.
1301 */
1302 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1303 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1304 {
1305 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1306 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1307 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1308 {
1309 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1310 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1311 }
1312 else
1313 {
1314 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1315 if (cbInstr + (uint32_t)cbDst <= 15)
1316 {
1317 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1318 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1319 }
1320 else
1321 {
1322 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1324 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1325 }
1326 }
1327 if (cbDst <= cbMaxRead)
1328 {
1329 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1330 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1331
1332 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1333 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1334 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1335 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1336 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1337 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1338 else
1339 Assert(!pvDst);
1340 return;
1341 }
1342 pVCpu->iem.s.pbInstrBuf = NULL;
1343
1344 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1345 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1346 }
1347# else
1348# error "refactor as needed"
1349 /*
1350 * If there is no special read handling, so we can read a bit more and
1351 * put it in the prefetch buffer.
1352 */
1353 if ( cbDst < cbMaxRead
1354 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1355 {
1356 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1357 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1358 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1359 { /* likely */ }
1360 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1361 {
1362 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1363 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1365 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1366 }
1367 else
1368 {
1369 Log((RT_SUCCESS(rcStrict)
1370 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1371 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1372 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1373 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1374 }
1375 }
1376# endif
1377 /*
1378 * Special read handling, so only read exactly what's needed.
1379 * This is a highly unlikely scenario.
1380 */
1381 else
1382 {
1383 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1384
1385 /* Check instruction length. */
1386 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1387 if (RT_LIKELY(cbInstr + cbDst <= 15))
1388 { /* likely */ }
1389 else
1390 {
1391 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1392 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1393 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1394 }
1395
1396 /* Do the reading. */
1397 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1398 if (cbToRead > 0)
1399 {
1400 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1401 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1402 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1403 { /* likely */ }
1404 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1405 {
1406 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1407 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1408 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1409 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1410 }
1411 else
1412 {
1413 Log((RT_SUCCESS(rcStrict)
1414 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1415 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1416 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1417 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1418 }
1419 }
1420
1421 /* Update the state and probably return. */
1422 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1423 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1424 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1425
1426 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1427 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1428 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1429 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1430 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1431 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1432 pVCpu->iem.s.pbInstrBuf = NULL;
1433 if (cbToRead == cbDst)
1434 return;
1435 Assert(cbToRead == cbMaxRead);
1436 }
1437
1438 /*
1439 * More to read, loop.
1440 */
1441 cbDst -= cbMaxRead;
1442 pvDst = (uint8_t *)pvDst + cbMaxRead;
1443 }
1444# else /* !IN_RING3 */
1445 RT_NOREF(pvDst, cbDst);
1446 if (pvDst || cbDst)
1447 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1448# endif /* !IN_RING3 */
1449}
1450
1451#else /* !IEM_WITH_CODE_TLB */
1452
1453/**
1454 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1455 * exception if it fails.
1456 *
1457 * @returns Strict VBox status code.
1458 * @param pVCpu The cross context virtual CPU structure of the
1459 * calling thread.
1460 * @param cbMin The minimum number of bytes relative offOpcode
1461 * that must be read.
1462 */
1463VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1464{
1465 /*
1466 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1467 *
1468 * First translate CS:rIP to a physical address.
1469 */
1470 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1471 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1472 uint8_t const cbLeft = cbOpcode - offOpcode;
1473 Assert(cbLeft < cbMin);
1474 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1475
1476 uint32_t cbToTryRead;
1477 RTGCPTR GCPtrNext;
1478 if (IEM_IS_64BIT_CODE(pVCpu))
1479 {
1480 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1481 if (!IEM_IS_CANONICAL(GCPtrNext))
1482 return iemRaiseGeneralProtectionFault0(pVCpu);
1483 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1484 }
1485 else
1486 {
1487 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1488 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1489 GCPtrNext32 += cbOpcode;
1490 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1491 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1492 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1493 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1494 if (!cbToTryRead) /* overflowed */
1495 {
1496 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1497 cbToTryRead = UINT32_MAX;
1498 /** @todo check out wrapping around the code segment. */
1499 }
1500 if (cbToTryRead < cbMin - cbLeft)
1501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1502 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1503
1504 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1505 if (cbToTryRead > cbLeftOnPage)
1506 cbToTryRead = cbLeftOnPage;
1507 }
1508
1509 /* Restrict to opcode buffer space.
1510
1511 We're making ASSUMPTIONS here based on work done previously in
1512 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1513 be fetched in case of an instruction crossing two pages. */
1514 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1515 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1516 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1517 { /* likely */ }
1518 else
1519 {
1520 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1521 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1522 return iemRaiseGeneralProtectionFault0(pVCpu);
1523 }
1524
1525 PGMPTWALKFAST WalkFast;
1526 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1527 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1528 &WalkFast);
1529 if (RT_SUCCESS(rc))
1530 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1531 else
1532 {
1533 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1534#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1535 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1536 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1537#endif
1538 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1539 }
1540 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1541 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1542
1543 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1544 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1545
1546 /*
1547 * Read the bytes at this address.
1548 *
1549 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1550 * and since PATM should only patch the start of an instruction there
1551 * should be no need to check again here.
1552 */
1553 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1554 {
1555 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1556 cbToTryRead, PGMACCESSORIGIN_IEM);
1557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1558 { /* likely */ }
1559 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1560 {
1561 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1562 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1563 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1564 }
1565 else
1566 {
1567 Log((RT_SUCCESS(rcStrict)
1568 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1569 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1570 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1571 return rcStrict;
1572 }
1573 }
1574 else
1575 {
1576 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1577 if (RT_SUCCESS(rc))
1578 { /* likely */ }
1579 else
1580 {
1581 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1582 return rc;
1583 }
1584 }
1585 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1586 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1587
1588 return VINF_SUCCESS;
1589}
1590
1591#endif /* !IEM_WITH_CODE_TLB */
1592#ifndef IEM_WITH_SETJMP
1593
1594/**
1595 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1596 *
1597 * @returns Strict VBox status code.
1598 * @param pVCpu The cross context virtual CPU structure of the
1599 * calling thread.
1600 * @param pb Where to return the opcode byte.
1601 */
1602VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1603{
1604 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1605 if (rcStrict == VINF_SUCCESS)
1606 {
1607 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1608 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1609 pVCpu->iem.s.offOpcode = offOpcode + 1;
1610 }
1611 else
1612 *pb = 0;
1613 return rcStrict;
1614}
1615
1616#else /* IEM_WITH_SETJMP */
1617
1618/**
1619 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1620 *
1621 * @returns The opcode byte.
1622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1623 */
1624uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1625{
1626# ifdef IEM_WITH_CODE_TLB
1627 uint8_t u8;
1628 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1629 return u8;
1630# else
1631 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1632 if (rcStrict == VINF_SUCCESS)
1633 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1634 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1635# endif
1636}
1637
1638#endif /* IEM_WITH_SETJMP */
1639
1640#ifndef IEM_WITH_SETJMP
1641
1642/**
1643 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1644 *
1645 * @returns Strict VBox status code.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 * @param pu16 Where to return the opcode dword.
1648 */
1649VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1650{
1651 uint8_t u8;
1652 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1653 if (rcStrict == VINF_SUCCESS)
1654 *pu16 = (int8_t)u8;
1655 return rcStrict;
1656}
1657
1658
1659/**
1660 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1661 *
1662 * @returns Strict VBox status code.
1663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1664 * @param pu32 Where to return the opcode dword.
1665 */
1666VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1667{
1668 uint8_t u8;
1669 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1670 if (rcStrict == VINF_SUCCESS)
1671 *pu32 = (int8_t)u8;
1672 return rcStrict;
1673}
1674
1675
1676/**
1677 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1678 *
1679 * @returns Strict VBox status code.
1680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1681 * @param pu64 Where to return the opcode qword.
1682 */
1683VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1684{
1685 uint8_t u8;
1686 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1687 if (rcStrict == VINF_SUCCESS)
1688 *pu64 = (int8_t)u8;
1689 return rcStrict;
1690}
1691
1692#endif /* !IEM_WITH_SETJMP */
1693
1694
1695#ifndef IEM_WITH_SETJMP
1696
1697/**
1698 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1699 *
1700 * @returns Strict VBox status code.
1701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1702 * @param pu16 Where to return the opcode word.
1703 */
1704VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1705{
1706 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1707 if (rcStrict == VINF_SUCCESS)
1708 {
1709 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1710# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1711 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1712# else
1713 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1714# endif
1715 pVCpu->iem.s.offOpcode = offOpcode + 2;
1716 }
1717 else
1718 *pu16 = 0;
1719 return rcStrict;
1720}
1721
1722#else /* IEM_WITH_SETJMP */
1723
1724/**
1725 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1726 *
1727 * @returns The opcode word.
1728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1729 */
1730uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1731{
1732# ifdef IEM_WITH_CODE_TLB
1733 uint16_t u16;
1734 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1735 return u16;
1736# else
1737 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1738 if (rcStrict == VINF_SUCCESS)
1739 {
1740 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1741 pVCpu->iem.s.offOpcode += 2;
1742# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1743 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1744# else
1745 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1746# endif
1747 }
1748 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1749# endif
1750}
1751
1752#endif /* IEM_WITH_SETJMP */
1753
1754#ifndef IEM_WITH_SETJMP
1755
1756/**
1757 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1758 *
1759 * @returns Strict VBox status code.
1760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1761 * @param pu32 Where to return the opcode double word.
1762 */
1763VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1764{
1765 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1766 if (rcStrict == VINF_SUCCESS)
1767 {
1768 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1769 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1770 pVCpu->iem.s.offOpcode = offOpcode + 2;
1771 }
1772 else
1773 *pu32 = 0;
1774 return rcStrict;
1775}
1776
1777
1778/**
1779 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1780 *
1781 * @returns Strict VBox status code.
1782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1783 * @param pu64 Where to return the opcode quad word.
1784 */
1785VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1786{
1787 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1788 if (rcStrict == VINF_SUCCESS)
1789 {
1790 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1791 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1792 pVCpu->iem.s.offOpcode = offOpcode + 2;
1793 }
1794 else
1795 *pu64 = 0;
1796 return rcStrict;
1797}
1798
1799#endif /* !IEM_WITH_SETJMP */
1800
1801#ifndef IEM_WITH_SETJMP
1802
1803/**
1804 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1805 *
1806 * @returns Strict VBox status code.
1807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1808 * @param pu32 Where to return the opcode dword.
1809 */
1810VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1811{
1812 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1813 if (rcStrict == VINF_SUCCESS)
1814 {
1815 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1816# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1817 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1818# else
1819 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1820 pVCpu->iem.s.abOpcode[offOpcode + 1],
1821 pVCpu->iem.s.abOpcode[offOpcode + 2],
1822 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1823# endif
1824 pVCpu->iem.s.offOpcode = offOpcode + 4;
1825 }
1826 else
1827 *pu32 = 0;
1828 return rcStrict;
1829}
1830
1831#else /* IEM_WITH_SETJMP */
1832
1833/**
1834 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1835 *
1836 * @returns The opcode dword.
1837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1838 */
1839uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1840{
1841# ifdef IEM_WITH_CODE_TLB
1842 uint32_t u32;
1843 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1844 return u32;
1845# else
1846 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1847 if (rcStrict == VINF_SUCCESS)
1848 {
1849 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1850 pVCpu->iem.s.offOpcode = offOpcode + 4;
1851# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1852 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1853# else
1854 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1855 pVCpu->iem.s.abOpcode[offOpcode + 1],
1856 pVCpu->iem.s.abOpcode[offOpcode + 2],
1857 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1858# endif
1859 }
1860 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1861# endif
1862}
1863
1864#endif /* IEM_WITH_SETJMP */
1865
1866#ifndef IEM_WITH_SETJMP
1867
1868/**
1869 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1870 *
1871 * @returns Strict VBox status code.
1872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1873 * @param pu64 Where to return the opcode dword.
1874 */
1875VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1876{
1877 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1878 if (rcStrict == VINF_SUCCESS)
1879 {
1880 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1881 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1882 pVCpu->iem.s.abOpcode[offOpcode + 1],
1883 pVCpu->iem.s.abOpcode[offOpcode + 2],
1884 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1885 pVCpu->iem.s.offOpcode = offOpcode + 4;
1886 }
1887 else
1888 *pu64 = 0;
1889 return rcStrict;
1890}
1891
1892
1893/**
1894 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1895 *
1896 * @returns Strict VBox status code.
1897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1898 * @param pu64 Where to return the opcode qword.
1899 */
1900VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1901{
1902 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1903 if (rcStrict == VINF_SUCCESS)
1904 {
1905 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1906 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1907 pVCpu->iem.s.abOpcode[offOpcode + 1],
1908 pVCpu->iem.s.abOpcode[offOpcode + 2],
1909 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1910 pVCpu->iem.s.offOpcode = offOpcode + 4;
1911 }
1912 else
1913 *pu64 = 0;
1914 return rcStrict;
1915}
1916
1917#endif /* !IEM_WITH_SETJMP */
1918
1919#ifndef IEM_WITH_SETJMP
1920
1921/**
1922 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1923 *
1924 * @returns Strict VBox status code.
1925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1926 * @param pu64 Where to return the opcode qword.
1927 */
1928VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1929{
1930 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1931 if (rcStrict == VINF_SUCCESS)
1932 {
1933 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1934# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1935 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1936# else
1937 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1938 pVCpu->iem.s.abOpcode[offOpcode + 1],
1939 pVCpu->iem.s.abOpcode[offOpcode + 2],
1940 pVCpu->iem.s.abOpcode[offOpcode + 3],
1941 pVCpu->iem.s.abOpcode[offOpcode + 4],
1942 pVCpu->iem.s.abOpcode[offOpcode + 5],
1943 pVCpu->iem.s.abOpcode[offOpcode + 6],
1944 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1945# endif
1946 pVCpu->iem.s.offOpcode = offOpcode + 8;
1947 }
1948 else
1949 *pu64 = 0;
1950 return rcStrict;
1951}
1952
1953#else /* IEM_WITH_SETJMP */
1954
1955/**
1956 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1957 *
1958 * @returns The opcode qword.
1959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1960 */
1961uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1962{
1963# ifdef IEM_WITH_CODE_TLB
1964 uint64_t u64;
1965 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1966 return u64;
1967# else
1968 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1969 if (rcStrict == VINF_SUCCESS)
1970 {
1971 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1972 pVCpu->iem.s.offOpcode = offOpcode + 8;
1973# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1974 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1975# else
1976 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1977 pVCpu->iem.s.abOpcode[offOpcode + 1],
1978 pVCpu->iem.s.abOpcode[offOpcode + 2],
1979 pVCpu->iem.s.abOpcode[offOpcode + 3],
1980 pVCpu->iem.s.abOpcode[offOpcode + 4],
1981 pVCpu->iem.s.abOpcode[offOpcode + 5],
1982 pVCpu->iem.s.abOpcode[offOpcode + 6],
1983 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1984# endif
1985 }
1986 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1987# endif
1988}
1989
1990#endif /* IEM_WITH_SETJMP */
1991
1992
1993
1994/** @name Misc Worker Functions.
1995 * @{
1996 */
1997
1998/**
1999 * Gets the exception class for the specified exception vector.
2000 *
2001 * @returns The class of the specified exception.
2002 * @param uVector The exception vector.
2003 */
2004static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
2005{
2006 Assert(uVector <= X86_XCPT_LAST);
2007 switch (uVector)
2008 {
2009 case X86_XCPT_DE:
2010 case X86_XCPT_TS:
2011 case X86_XCPT_NP:
2012 case X86_XCPT_SS:
2013 case X86_XCPT_GP:
2014 case X86_XCPT_SX: /* AMD only */
2015 return IEMXCPTCLASS_CONTRIBUTORY;
2016
2017 case X86_XCPT_PF:
2018 case X86_XCPT_VE: /* Intel only */
2019 return IEMXCPTCLASS_PAGE_FAULT;
2020
2021 case X86_XCPT_DF:
2022 return IEMXCPTCLASS_DOUBLE_FAULT;
2023 }
2024 return IEMXCPTCLASS_BENIGN;
2025}
2026
2027
2028/**
2029 * Evaluates how to handle an exception caused during delivery of another event
2030 * (exception / interrupt).
2031 *
2032 * @returns How to handle the recursive exception.
2033 * @param pVCpu The cross context virtual CPU structure of the
2034 * calling thread.
2035 * @param fPrevFlags The flags of the previous event.
2036 * @param uPrevVector The vector of the previous event.
2037 * @param fCurFlags The flags of the current exception.
2038 * @param uCurVector The vector of the current exception.
2039 * @param pfXcptRaiseInfo Where to store additional information about the
2040 * exception condition. Optional.
2041 */
2042VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2043 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2044{
2045 /*
2046 * Only CPU exceptions can be raised while delivering other events, software interrupt
2047 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2048 */
2049 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2050 Assert(pVCpu); RT_NOREF(pVCpu);
2051 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2052
2053 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2054 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2055 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2056 {
2057 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2058 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2059 {
2060 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2061 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2062 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2063 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2064 {
2065 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2066 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2067 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2068 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2069 uCurVector, pVCpu->cpum.GstCtx.cr2));
2070 }
2071 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2072 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2073 {
2074 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2075 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2076 }
2077 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2078 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2079 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2080 {
2081 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2082 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2083 }
2084 }
2085 else
2086 {
2087 if (uPrevVector == X86_XCPT_NMI)
2088 {
2089 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2090 if (uCurVector == X86_XCPT_PF)
2091 {
2092 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2093 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2094 }
2095 }
2096 else if ( uPrevVector == X86_XCPT_AC
2097 && uCurVector == X86_XCPT_AC)
2098 {
2099 enmRaise = IEMXCPTRAISE_CPU_HANG;
2100 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2101 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2102 }
2103 }
2104 }
2105 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2106 {
2107 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2108 if (uCurVector == X86_XCPT_PF)
2109 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2110 }
2111 else
2112 {
2113 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2114 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2115 }
2116
2117 if (pfXcptRaiseInfo)
2118 *pfXcptRaiseInfo = fRaiseInfo;
2119 return enmRaise;
2120}
2121
2122
2123/**
2124 * Enters the CPU shutdown state initiated by a triple fault or other
2125 * unrecoverable conditions.
2126 *
2127 * @returns Strict VBox status code.
2128 * @param pVCpu The cross context virtual CPU structure of the
2129 * calling thread.
2130 */
2131static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2132{
2133 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2134 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2135
2136 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2137 {
2138 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2139 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2140 }
2141
2142 RT_NOREF(pVCpu);
2143 return VINF_EM_TRIPLE_FAULT;
2144}
2145
2146
2147/**
2148 * Validates a new SS segment.
2149 *
2150 * @returns VBox strict status code.
2151 * @param pVCpu The cross context virtual CPU structure of the
2152 * calling thread.
2153 * @param NewSS The new SS selctor.
2154 * @param uCpl The CPL to load the stack for.
2155 * @param pDesc Where to return the descriptor.
2156 */
2157static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2158{
2159 /* Null selectors are not allowed (we're not called for dispatching
2160 interrupts with SS=0 in long mode). */
2161 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2162 {
2163 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2164 return iemRaiseTaskSwitchFault0(pVCpu);
2165 }
2166
2167 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2168 if ((NewSS & X86_SEL_RPL) != uCpl)
2169 {
2170 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2171 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2172 }
2173
2174 /*
2175 * Read the descriptor.
2176 */
2177 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2178 if (rcStrict != VINF_SUCCESS)
2179 return rcStrict;
2180
2181 /*
2182 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2183 */
2184 if (!pDesc->Legacy.Gen.u1DescType)
2185 {
2186 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2187 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2188 }
2189
2190 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2191 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2192 {
2193 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2194 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2195 }
2196 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2197 {
2198 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2199 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2200 }
2201
2202 /* Is it there? */
2203 /** @todo testcase: Is this checked before the canonical / limit check below? */
2204 if (!pDesc->Legacy.Gen.u1Present)
2205 {
2206 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2207 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2208 }
2209
2210 return VINF_SUCCESS;
2211}
2212
2213/** @} */
2214
2215
2216/** @name Raising Exceptions.
2217 *
2218 * @{
2219 */
2220
2221
2222/**
2223 * Loads the specified stack far pointer from the TSS.
2224 *
2225 * @returns VBox strict status code.
2226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2227 * @param uCpl The CPL to load the stack for.
2228 * @param pSelSS Where to return the new stack segment.
2229 * @param puEsp Where to return the new stack pointer.
2230 */
2231static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2232{
2233 VBOXSTRICTRC rcStrict;
2234 Assert(uCpl < 4);
2235
2236 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2237 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2238 {
2239 /*
2240 * 16-bit TSS (X86TSS16).
2241 */
2242 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2243 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2244 {
2245 uint32_t off = uCpl * 4 + 2;
2246 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2247 {
2248 /** @todo check actual access pattern here. */
2249 uint32_t u32Tmp = 0; /* gcc maybe... */
2250 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2251 if (rcStrict == VINF_SUCCESS)
2252 {
2253 *puEsp = RT_LOWORD(u32Tmp);
2254 *pSelSS = RT_HIWORD(u32Tmp);
2255 return VINF_SUCCESS;
2256 }
2257 }
2258 else
2259 {
2260 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2261 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2262 }
2263 break;
2264 }
2265
2266 /*
2267 * 32-bit TSS (X86TSS32).
2268 */
2269 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2270 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2271 {
2272 uint32_t off = uCpl * 8 + 4;
2273 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2274 {
2275/** @todo check actual access pattern here. */
2276 uint64_t u64Tmp;
2277 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2278 if (rcStrict == VINF_SUCCESS)
2279 {
2280 *puEsp = u64Tmp & UINT32_MAX;
2281 *pSelSS = (RTSEL)(u64Tmp >> 32);
2282 return VINF_SUCCESS;
2283 }
2284 }
2285 else
2286 {
2287 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2288 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2289 }
2290 break;
2291 }
2292
2293 default:
2294 AssertFailed();
2295 rcStrict = VERR_IEM_IPE_4;
2296 break;
2297 }
2298
2299 *puEsp = 0; /* make gcc happy */
2300 *pSelSS = 0; /* make gcc happy */
2301 return rcStrict;
2302}
2303
2304
2305/**
2306 * Loads the specified stack pointer from the 64-bit TSS.
2307 *
2308 * @returns VBox strict status code.
2309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2310 * @param uCpl The CPL to load the stack for.
2311 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2312 * @param puRsp Where to return the new stack pointer.
2313 */
2314static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2315{
2316 Assert(uCpl < 4);
2317 Assert(uIst < 8);
2318 *puRsp = 0; /* make gcc happy */
2319
2320 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2321 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2322
2323 uint32_t off;
2324 if (uIst)
2325 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2326 else
2327 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2328 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2329 {
2330 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2331 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2332 }
2333
2334 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2335}
2336
2337
2338/**
2339 * Adjust the CPU state according to the exception being raised.
2340 *
2341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2342 * @param u8Vector The exception that has been raised.
2343 */
2344DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2345{
2346 switch (u8Vector)
2347 {
2348 case X86_XCPT_DB:
2349 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2350 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2351 break;
2352 /** @todo Read the AMD and Intel exception reference... */
2353 }
2354}
2355
2356
2357/**
2358 * Implements exceptions and interrupts for real mode.
2359 *
2360 * @returns VBox strict status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param cbInstr The number of bytes to offset rIP by in the return
2363 * address.
2364 * @param u8Vector The interrupt / exception vector number.
2365 * @param fFlags The flags.
2366 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2367 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2368 */
2369static VBOXSTRICTRC
2370iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2371 uint8_t cbInstr,
2372 uint8_t u8Vector,
2373 uint32_t fFlags,
2374 uint16_t uErr,
2375 uint64_t uCr2) RT_NOEXCEPT
2376{
2377 NOREF(uErr); NOREF(uCr2);
2378 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2379
2380 /*
2381 * Read the IDT entry.
2382 */
2383 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2384 {
2385 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2386 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2387 }
2388 RTFAR16 Idte;
2389 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2390 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2391 {
2392 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2393 return rcStrict;
2394 }
2395
2396#ifdef LOG_ENABLED
2397 /* If software interrupt, try decode it if logging is enabled and such. */
2398 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2399 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2400 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2401#endif
2402
2403 /*
2404 * Push the stack frame.
2405 */
2406 uint8_t bUnmapInfo;
2407 uint16_t *pu16Frame;
2408 uint64_t uNewRsp;
2409 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2410 if (rcStrict != VINF_SUCCESS)
2411 return rcStrict;
2412
2413 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2414#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2415 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2416 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2417 fEfl |= UINT16_C(0xf000);
2418#endif
2419 pu16Frame[2] = (uint16_t)fEfl;
2420 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2421 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2422 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2423 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2424 return rcStrict;
2425
2426 /*
2427 * Load the vector address into cs:ip and make exception specific state
2428 * adjustments.
2429 */
2430 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2431 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2432 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2433 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2434 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2435 pVCpu->cpum.GstCtx.rip = Idte.off;
2436 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2437 IEMMISC_SET_EFL(pVCpu, fEfl);
2438
2439 /** @todo do we actually do this in real mode? */
2440 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2441 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2442
2443 /*
2444 * Deal with debug events that follows the exception and clear inhibit flags.
2445 */
2446 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2447 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2448 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2449 else
2450 {
2451 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2452 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2453 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2454 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2455 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2456 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2457 return iemRaiseDebugException(pVCpu);
2458 }
2459
2460 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2461 so best leave them alone in case we're in a weird kind of real mode... */
2462
2463 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2464}
2465
2466
2467/**
2468 * Loads a NULL data selector into when coming from V8086 mode.
2469 *
2470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2471 * @param pSReg Pointer to the segment register.
2472 */
2473DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2474{
2475 pSReg->Sel = 0;
2476 pSReg->ValidSel = 0;
2477 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2478 {
2479 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2480 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2481 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2482 }
2483 else
2484 {
2485 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2486 /** @todo check this on AMD-V */
2487 pSReg->u64Base = 0;
2488 pSReg->u32Limit = 0;
2489 }
2490}
2491
2492
2493/**
2494 * Loads a segment selector during a task switch in V8086 mode.
2495 *
2496 * @param pSReg Pointer to the segment register.
2497 * @param uSel The selector value to load.
2498 */
2499DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2500{
2501 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2502 pSReg->Sel = uSel;
2503 pSReg->ValidSel = uSel;
2504 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2505 pSReg->u64Base = uSel << 4;
2506 pSReg->u32Limit = 0xffff;
2507 pSReg->Attr.u = 0xf3;
2508}
2509
2510
2511/**
2512 * Loads a segment selector during a task switch in protected mode.
2513 *
2514 * In this task switch scenario, we would throw \#TS exceptions rather than
2515 * \#GPs.
2516 *
2517 * @returns VBox strict status code.
2518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2519 * @param pSReg Pointer to the segment register.
2520 * @param uSel The new selector value.
2521 *
2522 * @remarks This does _not_ handle CS or SS.
2523 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2524 */
2525static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2526{
2527 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2528
2529 /* Null data selector. */
2530 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2531 {
2532 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2533 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2534 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2535 return VINF_SUCCESS;
2536 }
2537
2538 /* Fetch the descriptor. */
2539 IEMSELDESC Desc;
2540 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2541 if (rcStrict != VINF_SUCCESS)
2542 {
2543 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2544 VBOXSTRICTRC_VAL(rcStrict)));
2545 return rcStrict;
2546 }
2547
2548 /* Must be a data segment or readable code segment. */
2549 if ( !Desc.Legacy.Gen.u1DescType
2550 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2551 {
2552 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2553 Desc.Legacy.Gen.u4Type));
2554 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2555 }
2556
2557 /* Check privileges for data segments and non-conforming code segments. */
2558 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2559 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2560 {
2561 /* The RPL and the new CPL must be less than or equal to the DPL. */
2562 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2563 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2564 {
2565 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2566 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2568 }
2569 }
2570
2571 /* Is it there? */
2572 if (!Desc.Legacy.Gen.u1Present)
2573 {
2574 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2575 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2576 }
2577
2578 /* The base and limit. */
2579 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2580 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2581
2582 /*
2583 * Ok, everything checked out fine. Now set the accessed bit before
2584 * committing the result into the registers.
2585 */
2586 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2587 {
2588 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2589 if (rcStrict != VINF_SUCCESS)
2590 return rcStrict;
2591 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2592 }
2593
2594 /* Commit */
2595 pSReg->Sel = uSel;
2596 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2597 pSReg->u32Limit = cbLimit;
2598 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2599 pSReg->ValidSel = uSel;
2600 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2601 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2602 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2603
2604 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2605 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2606 return VINF_SUCCESS;
2607}
2608
2609
2610/**
2611 * Performs a task switch.
2612 *
2613 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2614 * caller is responsible for performing the necessary checks (like DPL, TSS
2615 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2616 * reference for JMP, CALL, IRET.
2617 *
2618 * If the task switch is the due to a software interrupt or hardware exception,
2619 * the caller is responsible for validating the TSS selector and descriptor. See
2620 * Intel Instruction reference for INT n.
2621 *
2622 * @returns VBox strict status code.
2623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2624 * @param enmTaskSwitch The cause of the task switch.
2625 * @param uNextEip The EIP effective after the task switch.
2626 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2627 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2628 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2629 * @param SelTss The TSS selector of the new task.
2630 * @param pNewDescTss Pointer to the new TSS descriptor.
2631 */
2632VBOXSTRICTRC
2633iemTaskSwitch(PVMCPUCC pVCpu,
2634 IEMTASKSWITCH enmTaskSwitch,
2635 uint32_t uNextEip,
2636 uint32_t fFlags,
2637 uint16_t uErr,
2638 uint64_t uCr2,
2639 RTSEL SelTss,
2640 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2641{
2642 Assert(!IEM_IS_REAL_MODE(pVCpu));
2643 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2644 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2645
2646 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2647 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2648 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2649 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2650 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2651
2652 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2653 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2654
2655 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2656 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2657
2658 /* Update CR2 in case it's a page-fault. */
2659 /** @todo This should probably be done much earlier in IEM/PGM. See
2660 * @bugref{5653#c49}. */
2661 if (fFlags & IEM_XCPT_FLAGS_CR2)
2662 pVCpu->cpum.GstCtx.cr2 = uCr2;
2663
2664 /*
2665 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2666 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2667 */
2668 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2669 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2670 if (uNewTssLimit < uNewTssLimitMin)
2671 {
2672 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2673 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2674 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2675 }
2676
2677 /*
2678 * Task switches in VMX non-root mode always cause task switches.
2679 * The new TSS must have been read and validated (DPL, limits etc.) before a
2680 * task-switch VM-exit commences.
2681 *
2682 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2683 */
2684 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2685 {
2686 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2687 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2688 }
2689
2690 /*
2691 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2692 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2693 */
2694 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2695 {
2696 uint64_t const uExitInfo1 = SelTss;
2697 uint64_t uExitInfo2 = uErr;
2698 switch (enmTaskSwitch)
2699 {
2700 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2701 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2702 default: break;
2703 }
2704 if (fFlags & IEM_XCPT_FLAGS_ERR)
2705 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2706 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2707 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2708
2709 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2710 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2711 RT_NOREF2(uExitInfo1, uExitInfo2);
2712 }
2713
2714 /*
2715 * Check the current TSS limit. The last written byte to the current TSS during the
2716 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2717 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2718 *
2719 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2720 * end up with smaller than "legal" TSS limits.
2721 */
2722 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2723 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2724 if (uCurTssLimit < uCurTssLimitMin)
2725 {
2726 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2727 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2728 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2729 }
2730
2731 /*
2732 * Verify that the new TSS can be accessed and map it. Map only the required contents
2733 * and not the entire TSS.
2734 */
2735 uint8_t bUnmapInfoNewTss;
2736 void *pvNewTss;
2737 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2738 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2739 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2740 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2741 * not perform correct translation if this happens. See Intel spec. 7.2.1
2742 * "Task-State Segment". */
2743 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2744/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2745 * Consider wrapping the remainder into a function for simpler cleanup. */
2746 if (rcStrict != VINF_SUCCESS)
2747 {
2748 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2749 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2750 return rcStrict;
2751 }
2752
2753 /*
2754 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2755 */
2756 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2757 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2758 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2759 {
2760 uint8_t bUnmapInfoDescCurTss;
2761 PX86DESC pDescCurTss;
2762 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2763 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2764 if (rcStrict != VINF_SUCCESS)
2765 {
2766 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2767 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2768 return rcStrict;
2769 }
2770
2771 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2772 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2773 if (rcStrict != VINF_SUCCESS)
2774 {
2775 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2776 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2777 return rcStrict;
2778 }
2779
2780 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2781 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2782 {
2783 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2784 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2785 fEFlags &= ~X86_EFL_NT;
2786 }
2787 }
2788
2789 /*
2790 * Save the CPU state into the current TSS.
2791 */
2792 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2793 if (GCPtrNewTss == GCPtrCurTss)
2794 {
2795 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2796 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2797 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2798 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2799 pVCpu->cpum.GstCtx.ldtr.Sel));
2800 }
2801 if (fIsNewTss386)
2802 {
2803 /*
2804 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2805 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2806 */
2807 uint8_t bUnmapInfoCurTss32;
2808 void *pvCurTss32;
2809 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2810 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2811 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2812 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2813 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2814 if (rcStrict != VINF_SUCCESS)
2815 {
2816 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2817 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2818 return rcStrict;
2819 }
2820
2821 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2822 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2823 pCurTss32->eip = uNextEip;
2824 pCurTss32->eflags = fEFlags;
2825 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2826 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2827 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2828 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2829 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2830 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2831 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2832 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2833 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2834 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2835 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2836 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2837 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2838 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2839
2840 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2841 if (rcStrict != VINF_SUCCESS)
2842 {
2843 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2844 VBOXSTRICTRC_VAL(rcStrict)));
2845 return rcStrict;
2846 }
2847 }
2848 else
2849 {
2850 /*
2851 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2852 */
2853 uint8_t bUnmapInfoCurTss16;
2854 void *pvCurTss16;
2855 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2856 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2857 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2858 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2859 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2860 if (rcStrict != VINF_SUCCESS)
2861 {
2862 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2863 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2864 return rcStrict;
2865 }
2866
2867 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2868 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2869 pCurTss16->ip = uNextEip;
2870 pCurTss16->flags = (uint16_t)fEFlags;
2871 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2872 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2873 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2874 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2875 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2876 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2877 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2878 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2879 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2880 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2881 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2882 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2883
2884 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2885 if (rcStrict != VINF_SUCCESS)
2886 {
2887 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2888 VBOXSTRICTRC_VAL(rcStrict)));
2889 return rcStrict;
2890 }
2891 }
2892
2893 /*
2894 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2895 */
2896 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2897 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2898 {
2899 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2900 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2901 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2902 }
2903
2904 /*
2905 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2906 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2907 */
2908 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2909 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2910 bool fNewDebugTrap;
2911 if (fIsNewTss386)
2912 {
2913 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2914 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2915 uNewEip = pNewTss32->eip;
2916 uNewEflags = pNewTss32->eflags;
2917 uNewEax = pNewTss32->eax;
2918 uNewEcx = pNewTss32->ecx;
2919 uNewEdx = pNewTss32->edx;
2920 uNewEbx = pNewTss32->ebx;
2921 uNewEsp = pNewTss32->esp;
2922 uNewEbp = pNewTss32->ebp;
2923 uNewEsi = pNewTss32->esi;
2924 uNewEdi = pNewTss32->edi;
2925 uNewES = pNewTss32->es;
2926 uNewCS = pNewTss32->cs;
2927 uNewSS = pNewTss32->ss;
2928 uNewDS = pNewTss32->ds;
2929 uNewFS = pNewTss32->fs;
2930 uNewGS = pNewTss32->gs;
2931 uNewLdt = pNewTss32->selLdt;
2932 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2933 }
2934 else
2935 {
2936 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2937 uNewCr3 = 0;
2938 uNewEip = pNewTss16->ip;
2939 uNewEflags = pNewTss16->flags;
2940 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2941 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2942 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2943 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2944 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2945 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2946 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2947 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2948 uNewES = pNewTss16->es;
2949 uNewCS = pNewTss16->cs;
2950 uNewSS = pNewTss16->ss;
2951 uNewDS = pNewTss16->ds;
2952 uNewFS = 0;
2953 uNewGS = 0;
2954 uNewLdt = pNewTss16->selLdt;
2955 fNewDebugTrap = false;
2956 }
2957
2958 if (GCPtrNewTss == GCPtrCurTss)
2959 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2960 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2961
2962 /*
2963 * We're done accessing the new TSS.
2964 */
2965 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2966 if (rcStrict != VINF_SUCCESS)
2967 {
2968 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2969 return rcStrict;
2970 }
2971
2972 /*
2973 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2974 */
2975 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2976 {
2977 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2978 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2979 if (rcStrict != VINF_SUCCESS)
2980 {
2981 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2982 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2983 return rcStrict;
2984 }
2985
2986 /* Check that the descriptor indicates the new TSS is available (not busy). */
2987 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2988 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2989 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2990
2991 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2992 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2993 if (rcStrict != VINF_SUCCESS)
2994 {
2995 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2996 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2997 return rcStrict;
2998 }
2999 }
3000
3001 /*
3002 * From this point on, we're technically in the new task. We will defer exceptions
3003 * until the completion of the task switch but before executing any instructions in the new task.
3004 */
3005 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
3006 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
3007 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3008 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
3009 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
3010 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
3011 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3012
3013 /* Set the busy bit in TR. */
3014 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3015
3016 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3017 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3018 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3019 {
3020 uNewEflags |= X86_EFL_NT;
3021 }
3022
3023 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3024 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3025 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3026
3027 pVCpu->cpum.GstCtx.eip = uNewEip;
3028 pVCpu->cpum.GstCtx.eax = uNewEax;
3029 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3030 pVCpu->cpum.GstCtx.edx = uNewEdx;
3031 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3032 pVCpu->cpum.GstCtx.esp = uNewEsp;
3033 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3034 pVCpu->cpum.GstCtx.esi = uNewEsi;
3035 pVCpu->cpum.GstCtx.edi = uNewEdi;
3036
3037 uNewEflags &= X86_EFL_LIVE_MASK;
3038 uNewEflags |= X86_EFL_RA1_MASK;
3039 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3040
3041 /*
3042 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3043 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3044 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3045 */
3046 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3047 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3048
3049 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3050 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3051
3052 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3053 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3054
3055 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3056 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3057
3058 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3059 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3060
3061 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3062 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3063 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3064
3065 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3066 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3067 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3069
3070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3071 {
3072 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3073 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3074 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3075 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3076 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3077 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3078 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3079 }
3080
3081 /*
3082 * Switch CR3 for the new task.
3083 */
3084 if ( fIsNewTss386
3085 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3086 {
3087 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3088 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3089 AssertRCSuccessReturn(rc, rc);
3090
3091 /* Inform PGM. */
3092 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3093 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3094 AssertRCReturn(rc, rc);
3095 /* ignore informational status codes */
3096
3097 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3098 }
3099
3100 /*
3101 * Switch LDTR for the new task.
3102 */
3103 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3104 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3105 else
3106 {
3107 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3108
3109 IEMSELDESC DescNewLdt;
3110 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3111 if (rcStrict != VINF_SUCCESS)
3112 {
3113 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3114 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3115 return rcStrict;
3116 }
3117 if ( !DescNewLdt.Legacy.Gen.u1Present
3118 || DescNewLdt.Legacy.Gen.u1DescType
3119 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3120 {
3121 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3122 uNewLdt, DescNewLdt.Legacy.u));
3123 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3124 }
3125
3126 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3127 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3128 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3129 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3130 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3131 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3132 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3134 }
3135
3136 IEMSELDESC DescSS;
3137 if (IEM_IS_V86_MODE(pVCpu))
3138 {
3139 IEM_SET_CPL(pVCpu, 3);
3140 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3141 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3142 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3143 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3144 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3145 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3146
3147 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3148 DescSS.Legacy.u = 0;
3149 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3150 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3151 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3152 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3153 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3154 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3155 DescSS.Legacy.Gen.u2Dpl = 3;
3156 }
3157 else
3158 {
3159 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3160
3161 /*
3162 * Load the stack segment for the new task.
3163 */
3164 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3165 {
3166 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3167 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3168 }
3169
3170 /* Fetch the descriptor. */
3171 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3172 if (rcStrict != VINF_SUCCESS)
3173 {
3174 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3175 VBOXSTRICTRC_VAL(rcStrict)));
3176 return rcStrict;
3177 }
3178
3179 /* SS must be a data segment and writable. */
3180 if ( !DescSS.Legacy.Gen.u1DescType
3181 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3182 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3183 {
3184 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3185 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3186 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3187 }
3188
3189 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3190 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3191 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3192 {
3193 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3194 uNewCpl));
3195 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3196 }
3197
3198 /* Is it there? */
3199 if (!DescSS.Legacy.Gen.u1Present)
3200 {
3201 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3202 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3203 }
3204
3205 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3206 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3207
3208 /* Set the accessed bit before committing the result into SS. */
3209 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3210 {
3211 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3212 if (rcStrict != VINF_SUCCESS)
3213 return rcStrict;
3214 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3215 }
3216
3217 /* Commit SS. */
3218 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3219 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3220 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3221 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3222 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3223 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3225
3226 /* CPL has changed, update IEM before loading rest of segments. */
3227 IEM_SET_CPL(pVCpu, uNewCpl);
3228
3229 /*
3230 * Load the data segments for the new task.
3231 */
3232 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3233 if (rcStrict != VINF_SUCCESS)
3234 return rcStrict;
3235 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3236 if (rcStrict != VINF_SUCCESS)
3237 return rcStrict;
3238 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3239 if (rcStrict != VINF_SUCCESS)
3240 return rcStrict;
3241 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3242 if (rcStrict != VINF_SUCCESS)
3243 return rcStrict;
3244
3245 /*
3246 * Load the code segment for the new task.
3247 */
3248 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3249 {
3250 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3252 }
3253
3254 /* Fetch the descriptor. */
3255 IEMSELDESC DescCS;
3256 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3257 if (rcStrict != VINF_SUCCESS)
3258 {
3259 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3260 return rcStrict;
3261 }
3262
3263 /* CS must be a code segment. */
3264 if ( !DescCS.Legacy.Gen.u1DescType
3265 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3266 {
3267 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3268 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3269 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3270 }
3271
3272 /* For conforming CS, DPL must be less than or equal to the RPL. */
3273 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3274 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3275 {
3276 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3277 DescCS.Legacy.Gen.u2Dpl));
3278 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3279 }
3280
3281 /* For non-conforming CS, DPL must match RPL. */
3282 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3283 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3284 {
3285 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3286 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3287 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3288 }
3289
3290 /* Is it there? */
3291 if (!DescCS.Legacy.Gen.u1Present)
3292 {
3293 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3294 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3295 }
3296
3297 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3298 u64Base = X86DESC_BASE(&DescCS.Legacy);
3299
3300 /* Set the accessed bit before committing the result into CS. */
3301 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3302 {
3303 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3304 if (rcStrict != VINF_SUCCESS)
3305 return rcStrict;
3306 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3307 }
3308
3309 /* Commit CS. */
3310 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3311 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3312 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3313 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3314 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3315 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3316 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3317 }
3318
3319 /* Make sure the CPU mode is correct. */
3320 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3321 if (fExecNew != pVCpu->iem.s.fExec)
3322 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3323 pVCpu->iem.s.fExec = fExecNew;
3324
3325 /** @todo Debug trap. */
3326 if (fIsNewTss386 && fNewDebugTrap)
3327 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3328
3329 /*
3330 * Construct the error code masks based on what caused this task switch.
3331 * See Intel Instruction reference for INT.
3332 */
3333 uint16_t uExt;
3334 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3335 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3336 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3337 uExt = 1;
3338 else
3339 uExt = 0;
3340
3341 /*
3342 * Push any error code on to the new stack.
3343 */
3344 if (fFlags & IEM_XCPT_FLAGS_ERR)
3345 {
3346 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3347 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3348 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3349
3350 /* Check that there is sufficient space on the stack. */
3351 /** @todo Factor out segment limit checking for normal/expand down segments
3352 * into a separate function. */
3353 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3354 {
3355 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3356 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3357 {
3358 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3359 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3360 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3361 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3362 }
3363 }
3364 else
3365 {
3366 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3367 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3368 {
3369 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3370 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3371 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3372 }
3373 }
3374
3375
3376 if (fIsNewTss386)
3377 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3378 else
3379 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3380 if (rcStrict != VINF_SUCCESS)
3381 {
3382 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3383 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3384 return rcStrict;
3385 }
3386 }
3387
3388 /* Check the new EIP against the new CS limit. */
3389 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3390 {
3391 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3392 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3393 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3394 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3395 }
3396
3397 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3398 pVCpu->cpum.GstCtx.ss.Sel));
3399 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3400}
3401
3402
3403/**
3404 * Implements exceptions and interrupts for protected mode.
3405 *
3406 * @returns VBox strict status code.
3407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3408 * @param cbInstr The number of bytes to offset rIP by in the return
3409 * address.
3410 * @param u8Vector The interrupt / exception vector number.
3411 * @param fFlags The flags.
3412 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3413 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3414 */
3415static VBOXSTRICTRC
3416iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3417 uint8_t cbInstr,
3418 uint8_t u8Vector,
3419 uint32_t fFlags,
3420 uint16_t uErr,
3421 uint64_t uCr2) RT_NOEXCEPT
3422{
3423 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3424
3425 /*
3426 * Read the IDT entry.
3427 */
3428 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3429 {
3430 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3431 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3432 }
3433 X86DESC Idte;
3434 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3435 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3436 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3437 {
3438 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3439 return rcStrict;
3440 }
3441 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3442 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3443 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3444 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3445
3446 /*
3447 * Check the descriptor type, DPL and such.
3448 * ASSUMES this is done in the same order as described for call-gate calls.
3449 */
3450 if (Idte.Gate.u1DescType)
3451 {
3452 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3453 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3454 }
3455 bool fTaskGate = false;
3456 uint8_t f32BitGate = true;
3457 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3458 switch (Idte.Gate.u4Type)
3459 {
3460 case X86_SEL_TYPE_SYS_UNDEFINED:
3461 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3462 case X86_SEL_TYPE_SYS_LDT:
3463 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3464 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3465 case X86_SEL_TYPE_SYS_UNDEFINED2:
3466 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3467 case X86_SEL_TYPE_SYS_UNDEFINED3:
3468 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3469 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3470 case X86_SEL_TYPE_SYS_UNDEFINED4:
3471 {
3472 /** @todo check what actually happens when the type is wrong...
3473 * esp. call gates. */
3474 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3475 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3476 }
3477
3478 case X86_SEL_TYPE_SYS_286_INT_GATE:
3479 f32BitGate = false;
3480 RT_FALL_THRU();
3481 case X86_SEL_TYPE_SYS_386_INT_GATE:
3482 fEflToClear |= X86_EFL_IF;
3483 break;
3484
3485 case X86_SEL_TYPE_SYS_TASK_GATE:
3486 fTaskGate = true;
3487#ifndef IEM_IMPLEMENTS_TASKSWITCH
3488 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3489#endif
3490 break;
3491
3492 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3493 f32BitGate = false;
3494 break;
3495 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3496 break;
3497
3498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3499 }
3500
3501 /* Check DPL against CPL if applicable. */
3502 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3503 {
3504 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3505 {
3506 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3507 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3508 }
3509 }
3510
3511 /* Is it there? */
3512 if (!Idte.Gate.u1Present)
3513 {
3514 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3515 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3516 }
3517
3518 /* Is it a task-gate? */
3519 if (fTaskGate)
3520 {
3521 /*
3522 * Construct the error code masks based on what caused this task switch.
3523 * See Intel Instruction reference for INT.
3524 */
3525 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3526 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3527 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3528 RTSEL SelTss = Idte.Gate.u16Sel;
3529
3530 /*
3531 * Fetch the TSS descriptor in the GDT.
3532 */
3533 IEMSELDESC DescTSS;
3534 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3535 if (rcStrict != VINF_SUCCESS)
3536 {
3537 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3538 VBOXSTRICTRC_VAL(rcStrict)));
3539 return rcStrict;
3540 }
3541
3542 /* The TSS descriptor must be a system segment and be available (not busy). */
3543 if ( DescTSS.Legacy.Gen.u1DescType
3544 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3545 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3546 {
3547 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3548 u8Vector, SelTss, DescTSS.Legacy.au64));
3549 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3550 }
3551
3552 /* The TSS must be present. */
3553 if (!DescTSS.Legacy.Gen.u1Present)
3554 {
3555 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3556 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3557 }
3558
3559 /* Do the actual task switch. */
3560 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3561 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3562 fFlags, uErr, uCr2, SelTss, &DescTSS);
3563 }
3564
3565 /* A null CS is bad. */
3566 RTSEL NewCS = Idte.Gate.u16Sel;
3567 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3568 {
3569 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3570 return iemRaiseGeneralProtectionFault0(pVCpu);
3571 }
3572
3573 /* Fetch the descriptor for the new CS. */
3574 IEMSELDESC DescCS;
3575 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3576 if (rcStrict != VINF_SUCCESS)
3577 {
3578 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3579 return rcStrict;
3580 }
3581
3582 /* Must be a code segment. */
3583 if (!DescCS.Legacy.Gen.u1DescType)
3584 {
3585 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3586 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3587 }
3588 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3589 {
3590 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3591 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3592 }
3593
3594 /* Don't allow lowering the privilege level. */
3595 /** @todo Does the lowering of privileges apply to software interrupts
3596 * only? This has bearings on the more-privileged or
3597 * same-privilege stack behavior further down. A testcase would
3598 * be nice. */
3599 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3600 {
3601 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3602 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3603 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3604 }
3605
3606 /* Make sure the selector is present. */
3607 if (!DescCS.Legacy.Gen.u1Present)
3608 {
3609 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3610 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3611 }
3612
3613#ifdef LOG_ENABLED
3614 /* If software interrupt, try decode it if logging is enabled and such. */
3615 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3616 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3617 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3618#endif
3619
3620 /* Check the new EIP against the new CS limit. */
3621 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3622 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3623 ? Idte.Gate.u16OffsetLow
3624 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3625 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3626 if (uNewEip > cbLimitCS)
3627 {
3628 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3629 u8Vector, uNewEip, cbLimitCS, NewCS));
3630 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3631 }
3632 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3633
3634 /* Calc the flag image to push. */
3635 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3636 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3637 fEfl &= ~X86_EFL_RF;
3638 else
3639 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3640
3641 /* From V8086 mode only go to CPL 0. */
3642 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3643 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3644 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3645 {
3646 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3647 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3648 }
3649
3650 /*
3651 * If the privilege level changes, we need to get a new stack from the TSS.
3652 * This in turns means validating the new SS and ESP...
3653 */
3654 if (uNewCpl != IEM_GET_CPL(pVCpu))
3655 {
3656 RTSEL NewSS;
3657 uint32_t uNewEsp;
3658 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3659 if (rcStrict != VINF_SUCCESS)
3660 return rcStrict;
3661
3662 IEMSELDESC DescSS;
3663 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3667 if (!DescSS.Legacy.Gen.u1DefBig)
3668 {
3669 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3670 uNewEsp = (uint16_t)uNewEsp;
3671 }
3672
3673 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3674
3675 /* Check that there is sufficient space for the stack frame. */
3676 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3677 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3678 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3679 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3680
3681 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3682 {
3683 if ( uNewEsp - 1 > cbLimitSS
3684 || uNewEsp < cbStackFrame)
3685 {
3686 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3687 u8Vector, NewSS, uNewEsp, cbStackFrame));
3688 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3689 }
3690 }
3691 else
3692 {
3693 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3694 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3695 {
3696 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3697 u8Vector, NewSS, uNewEsp, cbStackFrame));
3698 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3699 }
3700 }
3701
3702 /*
3703 * Start making changes.
3704 */
3705
3706 /* Set the new CPL so that stack accesses use it. */
3707 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3708 IEM_SET_CPL(pVCpu, uNewCpl);
3709
3710 /* Create the stack frame. */
3711 uint8_t bUnmapInfoStackFrame;
3712 RTPTRUNION uStackFrame;
3713 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3714 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3715 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3716 if (rcStrict != VINF_SUCCESS)
3717 return rcStrict;
3718 if (f32BitGate)
3719 {
3720 if (fFlags & IEM_XCPT_FLAGS_ERR)
3721 *uStackFrame.pu32++ = uErr;
3722 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3723 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3724 uStackFrame.pu32[2] = fEfl;
3725 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3726 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3727 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3728 if (fEfl & X86_EFL_VM)
3729 {
3730 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3731 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3732 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3733 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3734 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3735 }
3736 }
3737 else
3738 {
3739 if (fFlags & IEM_XCPT_FLAGS_ERR)
3740 *uStackFrame.pu16++ = uErr;
3741 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3742 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3743 uStackFrame.pu16[2] = fEfl;
3744 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3745 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3746 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3747 if (fEfl & X86_EFL_VM)
3748 {
3749 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3750 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3751 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3752 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3753 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3754 }
3755 }
3756 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3757 if (rcStrict != VINF_SUCCESS)
3758 return rcStrict;
3759
3760 /* Mark the selectors 'accessed' (hope this is the correct time). */
3761 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3762 * after pushing the stack frame? (Write protect the gdt + stack to
3763 * find out.) */
3764 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3765 {
3766 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3767 if (rcStrict != VINF_SUCCESS)
3768 return rcStrict;
3769 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3770 }
3771
3772 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3773 {
3774 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3778 }
3779
3780 /*
3781 * Start comitting the register changes (joins with the DPL=CPL branch).
3782 */
3783 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3784 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3785 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3786 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3787 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3788 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3789 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3790 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3791 * SP is loaded).
3792 * Need to check the other combinations too:
3793 * - 16-bit TSS, 32-bit handler
3794 * - 32-bit TSS, 16-bit handler */
3795 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3796 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3797 else
3798 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3799
3800 if (fEfl & X86_EFL_VM)
3801 {
3802 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3803 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3804 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3805 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3806 }
3807 }
3808 /*
3809 * Same privilege, no stack change and smaller stack frame.
3810 */
3811 else
3812 {
3813 uint64_t uNewRsp;
3814 uint8_t bUnmapInfoStackFrame;
3815 RTPTRUNION uStackFrame;
3816 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3817 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3818 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3819 if (rcStrict != VINF_SUCCESS)
3820 return rcStrict;
3821
3822 if (f32BitGate)
3823 {
3824 if (fFlags & IEM_XCPT_FLAGS_ERR)
3825 *uStackFrame.pu32++ = uErr;
3826 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3827 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3828 uStackFrame.pu32[2] = fEfl;
3829 }
3830 else
3831 {
3832 if (fFlags & IEM_XCPT_FLAGS_ERR)
3833 *uStackFrame.pu16++ = uErr;
3834 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3835 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3836 uStackFrame.pu16[2] = fEfl;
3837 }
3838 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3839 if (rcStrict != VINF_SUCCESS)
3840 return rcStrict;
3841
3842 /* Mark the CS selector as 'accessed'. */
3843 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3844 {
3845 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3846 if (rcStrict != VINF_SUCCESS)
3847 return rcStrict;
3848 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3849 }
3850
3851 /*
3852 * Start committing the register changes (joins with the other branch).
3853 */
3854 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3855 }
3856
3857 /* ... register committing continues. */
3858 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3859 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3860 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3861 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3862 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3863 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3864
3865 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3866 fEfl &= ~fEflToClear;
3867 IEMMISC_SET_EFL(pVCpu, fEfl);
3868
3869 if (fFlags & IEM_XCPT_FLAGS_CR2)
3870 pVCpu->cpum.GstCtx.cr2 = uCr2;
3871
3872 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3873 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3874
3875 /* Make sure the execution flags are correct. */
3876 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3877 if (fExecNew != pVCpu->iem.s.fExec)
3878 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3879 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3880 pVCpu->iem.s.fExec = fExecNew;
3881 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3882
3883 /*
3884 * Deal with debug events that follows the exception and clear inhibit flags.
3885 */
3886 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3887 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3888 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3889 else
3890 {
3891 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3892 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3893 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3894 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3895 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3896 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3897 return iemRaiseDebugException(pVCpu);
3898 }
3899
3900 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3901}
3902
3903
3904/**
3905 * Implements exceptions and interrupts for long mode.
3906 *
3907 * @returns VBox strict status code.
3908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3909 * @param cbInstr The number of bytes to offset rIP by in the return
3910 * address.
3911 * @param u8Vector The interrupt / exception vector number.
3912 * @param fFlags The flags.
3913 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3914 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3915 */
3916static VBOXSTRICTRC
3917iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3918 uint8_t cbInstr,
3919 uint8_t u8Vector,
3920 uint32_t fFlags,
3921 uint16_t uErr,
3922 uint64_t uCr2) RT_NOEXCEPT
3923{
3924 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3925
3926 /*
3927 * Read the IDT entry.
3928 */
3929 uint16_t offIdt = (uint16_t)u8Vector << 4;
3930 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3931 {
3932 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3933 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3934 }
3935 X86DESC64 Idte;
3936#ifdef _MSC_VER /* Shut up silly compiler warning. */
3937 Idte.au64[0] = 0;
3938 Idte.au64[1] = 0;
3939#endif
3940 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3942 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3943 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3944 {
3945 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3946 return rcStrict;
3947 }
3948 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3949 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3950 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3951
3952 /*
3953 * Check the descriptor type, DPL and such.
3954 * ASSUMES this is done in the same order as described for call-gate calls.
3955 */
3956 if (Idte.Gate.u1DescType)
3957 {
3958 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3959 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3960 }
3961 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3962 switch (Idte.Gate.u4Type)
3963 {
3964 case AMD64_SEL_TYPE_SYS_INT_GATE:
3965 fEflToClear |= X86_EFL_IF;
3966 break;
3967 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3968 break;
3969
3970 default:
3971 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3972 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3973 }
3974
3975 /* Check DPL against CPL if applicable. */
3976 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3977 {
3978 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3979 {
3980 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3981 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3982 }
3983 }
3984
3985 /* Is it there? */
3986 if (!Idte.Gate.u1Present)
3987 {
3988 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3989 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3990 }
3991
3992 /* A null CS is bad. */
3993 RTSEL NewCS = Idte.Gate.u16Sel;
3994 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3995 {
3996 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3997 return iemRaiseGeneralProtectionFault0(pVCpu);
3998 }
3999
4000 /* Fetch the descriptor for the new CS. */
4001 IEMSELDESC DescCS;
4002 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4003 if (rcStrict != VINF_SUCCESS)
4004 {
4005 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4006 return rcStrict;
4007 }
4008
4009 /* Must be a 64-bit code segment. */
4010 if (!DescCS.Long.Gen.u1DescType)
4011 {
4012 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4013 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4014 }
4015 if ( !DescCS.Long.Gen.u1Long
4016 || DescCS.Long.Gen.u1DefBig
4017 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4018 {
4019 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4020 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4021 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4022 }
4023
4024 /* Don't allow lowering the privilege level. For non-conforming CS
4025 selectors, the CS.DPL sets the privilege level the trap/interrupt
4026 handler runs at. For conforming CS selectors, the CPL remains
4027 unchanged, but the CS.DPL must be <= CPL. */
4028 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4029 * when CPU in Ring-0. Result \#GP? */
4030 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4031 {
4032 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4033 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4034 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4035 }
4036
4037
4038 /* Make sure the selector is present. */
4039 if (!DescCS.Legacy.Gen.u1Present)
4040 {
4041 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4042 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4043 }
4044
4045 /* Check that the new RIP is canonical. */
4046 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4047 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4048 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4049 if (!IEM_IS_CANONICAL(uNewRip))
4050 {
4051 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4052 return iemRaiseGeneralProtectionFault0(pVCpu);
4053 }
4054
4055 /*
4056 * If the privilege level changes or if the IST isn't zero, we need to get
4057 * a new stack from the TSS.
4058 */
4059 uint64_t uNewRsp;
4060 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4061 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4062 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4063 || Idte.Gate.u3IST != 0)
4064 {
4065 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4066 if (rcStrict != VINF_SUCCESS)
4067 return rcStrict;
4068 }
4069 else
4070 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4071 uNewRsp &= ~(uint64_t)0xf;
4072
4073 /*
4074 * Calc the flag image to push.
4075 */
4076 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4077 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4078 fEfl &= ~X86_EFL_RF;
4079 else
4080 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4081
4082 /*
4083 * Start making changes.
4084 */
4085 /* Set the new CPL so that stack accesses use it. */
4086 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4087 IEM_SET_CPL(pVCpu, uNewCpl);
4088/** @todo Setting CPL this early seems wrong as it would affect and errors we
4089 * raise accessing the stack and (?) GDT/LDT... */
4090
4091 /* Create the stack frame. */
4092 uint8_t bUnmapInfoStackFrame;
4093 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4094 RTPTRUNION uStackFrame;
4095 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4096 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4097 if (rcStrict != VINF_SUCCESS)
4098 return rcStrict;
4099
4100 if (fFlags & IEM_XCPT_FLAGS_ERR)
4101 *uStackFrame.pu64++ = uErr;
4102 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4103 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4104 uStackFrame.pu64[2] = fEfl;
4105 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4106 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4107 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4108 if (rcStrict != VINF_SUCCESS)
4109 return rcStrict;
4110
4111 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4112 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4113 * after pushing the stack frame? (Write protect the gdt + stack to
4114 * find out.) */
4115 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4116 {
4117 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4118 if (rcStrict != VINF_SUCCESS)
4119 return rcStrict;
4120 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4121 }
4122
4123 /*
4124 * Start comitting the register changes.
4125 */
4126 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4127 * hidden registers when interrupting 32-bit or 16-bit code! */
4128 if (uNewCpl != uOldCpl)
4129 {
4130 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4131 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4132 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4133 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4134 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4135 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4136 }
4137 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4138 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4139 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4140 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4141 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4142 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4143 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4144 pVCpu->cpum.GstCtx.rip = uNewRip;
4145
4146 fEfl &= ~fEflToClear;
4147 IEMMISC_SET_EFL(pVCpu, fEfl);
4148
4149 if (fFlags & IEM_XCPT_FLAGS_CR2)
4150 pVCpu->cpum.GstCtx.cr2 = uCr2;
4151
4152 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4153 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4154
4155 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4156
4157 /*
4158 * Deal with debug events that follows the exception and clear inhibit flags.
4159 */
4160 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4161 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4162 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4163 else
4164 {
4165 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4166 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4167 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4168 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4169 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4170 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4171 return iemRaiseDebugException(pVCpu);
4172 }
4173
4174 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4175}
4176
4177
4178/**
4179 * Implements exceptions and interrupts.
4180 *
4181 * All exceptions and interrupts goes thru this function!
4182 *
4183 * @returns VBox strict status code.
4184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4185 * @param cbInstr The number of bytes to offset rIP by in the return
4186 * address.
4187 * @param u8Vector The interrupt / exception vector number.
4188 * @param fFlags The flags.
4189 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4190 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4191 */
4192VBOXSTRICTRC
4193iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4194 uint8_t cbInstr,
4195 uint8_t u8Vector,
4196 uint32_t fFlags,
4197 uint16_t uErr,
4198 uint64_t uCr2) RT_NOEXCEPT
4199{
4200 /*
4201 * Get all the state that we might need here.
4202 */
4203 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4204 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4205
4206#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4207 /*
4208 * Flush prefetch buffer
4209 */
4210 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4211#endif
4212
4213 /*
4214 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4215 */
4216 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4217 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4218 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4219 | IEM_XCPT_FLAGS_BP_INSTR
4220 | IEM_XCPT_FLAGS_ICEBP_INSTR
4221 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4222 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4223 {
4224 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4225 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4226 u8Vector = X86_XCPT_GP;
4227 uErr = 0;
4228 }
4229
4230 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4231#ifdef DBGFTRACE_ENABLED
4232 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4233 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4234 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4235#endif
4236
4237 /*
4238 * Check if DBGF wants to intercept the exception.
4239 */
4240 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4241 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4242 { /* likely */ }
4243 else
4244 {
4245 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4246 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4247 if (rcStrict != VINF_SUCCESS)
4248 return rcStrict;
4249 }
4250
4251 /*
4252 * Evaluate whether NMI blocking should be in effect.
4253 * Normally, NMI blocking is in effect whenever we inject an NMI.
4254 */
4255 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4256 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4257
4258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4259 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4260 {
4261 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4262 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4263 return rcStrict0;
4264
4265 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4266 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4267 {
4268 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4269 fBlockNmi = false;
4270 }
4271 }
4272#endif
4273
4274#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4275 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4276 {
4277 /*
4278 * If the event is being injected as part of VMRUN, it isn't subject to event
4279 * intercepts in the nested-guest. However, secondary exceptions that occur
4280 * during injection of any event -are- subject to exception intercepts.
4281 *
4282 * See AMD spec. 15.20 "Event Injection".
4283 */
4284 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4285 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4286 else
4287 {
4288 /*
4289 * Check and handle if the event being raised is intercepted.
4290 */
4291 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4292 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4293 return rcStrict0;
4294 }
4295 }
4296#endif
4297
4298 /*
4299 * Set NMI blocking if necessary.
4300 */
4301 if (fBlockNmi)
4302 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4303
4304 /*
4305 * Do recursion accounting.
4306 */
4307 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4308 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4309 if (pVCpu->iem.s.cXcptRecursions == 0)
4310 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4311 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4312 else
4313 {
4314 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4315 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4316 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4317
4318 if (pVCpu->iem.s.cXcptRecursions >= 4)
4319 {
4320#ifdef DEBUG_bird
4321 AssertFailed();
4322#endif
4323 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4324 }
4325
4326 /*
4327 * Evaluate the sequence of recurring events.
4328 */
4329 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4330 NULL /* pXcptRaiseInfo */);
4331 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4332 { /* likely */ }
4333 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4334 {
4335 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4336 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4337 u8Vector = X86_XCPT_DF;
4338 uErr = 0;
4339#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4340 /* VMX nested-guest #DF intercept needs to be checked here. */
4341 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4342 {
4343 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4344 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4345 return rcStrict0;
4346 }
4347#endif
4348 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4349 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4350 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4351 }
4352 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4353 {
4354 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4355 return iemInitiateCpuShutdown(pVCpu);
4356 }
4357 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4358 {
4359 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4360 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4361 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4362 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4363 return VERR_EM_GUEST_CPU_HANG;
4364 }
4365 else
4366 {
4367 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4368 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4369 return VERR_IEM_IPE_9;
4370 }
4371
4372 /*
4373 * The 'EXT' bit is set when an exception occurs during deliver of an external
4374 * event (such as an interrupt or earlier exception)[1]. Privileged software
4375 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4376 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4377 *
4378 * [1] - Intel spec. 6.13 "Error Code"
4379 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4380 * [3] - Intel Instruction reference for INT n.
4381 */
4382 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4383 && (fFlags & IEM_XCPT_FLAGS_ERR)
4384 && u8Vector != X86_XCPT_PF
4385 && u8Vector != X86_XCPT_DF)
4386 {
4387 uErr |= X86_TRAP_ERR_EXTERNAL;
4388 }
4389 }
4390
4391 pVCpu->iem.s.cXcptRecursions++;
4392 pVCpu->iem.s.uCurXcpt = u8Vector;
4393 pVCpu->iem.s.fCurXcpt = fFlags;
4394 pVCpu->iem.s.uCurXcptErr = uErr;
4395 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4396
4397 /*
4398 * Extensive logging.
4399 */
4400#if defined(LOG_ENABLED) && defined(IN_RING3)
4401 if (LogIs3Enabled())
4402 {
4403 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4404 char szRegs[4096];
4405 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4406 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4407 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4408 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4409 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4410 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4411 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4412 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4413 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4414 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4415 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4416 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4417 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4418 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4419 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4420 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4421 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4422 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4423 " efer=%016VR{efer}\n"
4424 " pat=%016VR{pat}\n"
4425 " sf_mask=%016VR{sf_mask}\n"
4426 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4427 " lstar=%016VR{lstar}\n"
4428 " star=%016VR{star} cstar=%016VR{cstar}\n"
4429 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4430 );
4431
4432 char szInstr[256];
4433 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4434 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4435 szInstr, sizeof(szInstr), NULL);
4436 Log3(("%s%s\n", szRegs, szInstr));
4437 }
4438#endif /* LOG_ENABLED */
4439
4440 /*
4441 * Stats.
4442 */
4443 uint64_t const uTimestamp = ASMReadTSC();
4444 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4445 {
4446 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4447 EMHistoryAddExit(pVCpu,
4448 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4449 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4450 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4451 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4452 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
4453 }
4454 else
4455 {
4456 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4457 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4458 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4459 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4460 if (fFlags & IEM_XCPT_FLAGS_ERR)
4461 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4462 if (fFlags & IEM_XCPT_FLAGS_CR2)
4463 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4464 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
4465 }
4466
4467 /*
4468 * Hack alert! Convert incoming debug events to slient on Intel.
4469 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4470 */
4471 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4472 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4473 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4474 { /* ignore */ }
4475 else
4476 {
4477 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4478 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4479 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4480 | CPUMCTX_DBG_HIT_DRX_SILENT;
4481 }
4482
4483 /*
4484 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4485 * to ensure that a stale TLB or paging cache entry will only cause one
4486 * spurious #PF.
4487 */
4488 if ( u8Vector == X86_XCPT_PF
4489 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4490 IEMTlbInvalidatePage(pVCpu, uCr2);
4491
4492 /*
4493 * Call the mode specific worker function.
4494 */
4495 VBOXSTRICTRC rcStrict;
4496 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4497 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4498 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4499 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4500 else
4501 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4502
4503 /* Flush the prefetch buffer. */
4504 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4505
4506 /*
4507 * Unwind.
4508 */
4509 pVCpu->iem.s.cXcptRecursions--;
4510 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4511 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4512 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4513 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4514 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4515 return rcStrict;
4516}
4517
4518#ifdef IEM_WITH_SETJMP
4519/**
4520 * See iemRaiseXcptOrInt. Will not return.
4521 */
4522DECL_NO_RETURN(void)
4523iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4524 uint8_t cbInstr,
4525 uint8_t u8Vector,
4526 uint32_t fFlags,
4527 uint16_t uErr,
4528 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4529{
4530 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4531 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4532}
4533#endif
4534
4535
4536/** \#DE - 00. */
4537VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4538{
4539 if (GCMIsInterceptingXcptDE(pVCpu))
4540 {
4541 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4542 if (rc == VINF_SUCCESS)
4543 {
4544 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4545 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4546 }
4547 }
4548 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4549}
4550
4551
4552#ifdef IEM_WITH_SETJMP
4553/** \#DE - 00. */
4554DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4555{
4556 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4557}
4558#endif
4559
4560
4561/** \#DB - 01.
4562 * @note This automatically clear DR7.GD. */
4563VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4564{
4565 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4566 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4567 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4568}
4569
4570
4571/** \#BR - 05. */
4572VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4573{
4574 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4575}
4576
4577
4578/** \#UD - 06. */
4579VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4580{
4581 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4582}
4583
4584
4585#ifdef IEM_WITH_SETJMP
4586/** \#UD - 06. */
4587DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4588{
4589 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4590}
4591#endif
4592
4593
4594/** \#NM - 07. */
4595VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4596{
4597 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4598}
4599
4600
4601#ifdef IEM_WITH_SETJMP
4602/** \#NM - 07. */
4603DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4604{
4605 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4606}
4607#endif
4608
4609
4610/** \#TS(err) - 0a. */
4611VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4612{
4613 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4614}
4615
4616
4617/** \#TS(tr) - 0a. */
4618VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4619{
4620 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4621 pVCpu->cpum.GstCtx.tr.Sel, 0);
4622}
4623
4624
4625/** \#TS(0) - 0a. */
4626VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4627{
4628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4629 0, 0);
4630}
4631
4632
4633/** \#TS(err) - 0a. */
4634VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4635{
4636 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4637 uSel & X86_SEL_MASK_OFF_RPL, 0);
4638}
4639
4640
4641/** \#NP(err) - 0b. */
4642VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4643{
4644 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4645}
4646
4647
4648/** \#NP(sel) - 0b. */
4649VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4650{
4651 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4652 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4653 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4654 uSel & ~X86_SEL_RPL, 0);
4655}
4656
4657
4658/** \#SS(seg) - 0c. */
4659VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4660{
4661 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4662 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4663 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4664 uSel & ~X86_SEL_RPL, 0);
4665}
4666
4667
4668/** \#SS(err) - 0c. */
4669VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4670{
4671 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4672 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4674}
4675
4676
4677/** \#GP(n) - 0d. */
4678VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4679{
4680 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4681 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4682}
4683
4684
4685/** \#GP(0) - 0d. */
4686VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4687{
4688 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4690}
4691
4692#ifdef IEM_WITH_SETJMP
4693/** \#GP(0) - 0d. */
4694DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4695{
4696 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4697 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4698}
4699#endif
4700
4701
4702/** \#GP(sel) - 0d. */
4703VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4704{
4705 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4706 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4708 Sel & ~X86_SEL_RPL, 0);
4709}
4710
4711
4712/** \#GP(0) - 0d. */
4713VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4714{
4715 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4717}
4718
4719
4720/** \#GP(sel) - 0d. */
4721VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4722{
4723 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4724 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4725 NOREF(iSegReg); NOREF(fAccess);
4726 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4727 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4728}
4729
4730#ifdef IEM_WITH_SETJMP
4731/** \#GP(sel) - 0d, longjmp. */
4732DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4733{
4734 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4735 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4736 NOREF(iSegReg); NOREF(fAccess);
4737 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4738 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4739}
4740#endif
4741
4742/** \#GP(sel) - 0d. */
4743VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4744{
4745 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4746 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4747 NOREF(Sel);
4748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4749}
4750
4751#ifdef IEM_WITH_SETJMP
4752/** \#GP(sel) - 0d, longjmp. */
4753DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4754{
4755 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4756 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4757 NOREF(Sel);
4758 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4759}
4760#endif
4761
4762
4763/** \#GP(sel) - 0d. */
4764VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4765{
4766 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4767 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4768 NOREF(iSegReg); NOREF(fAccess);
4769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4770}
4771
4772#ifdef IEM_WITH_SETJMP
4773/** \#GP(sel) - 0d, longjmp. */
4774DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4775{
4776 NOREF(iSegReg); NOREF(fAccess);
4777 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4778}
4779#endif
4780
4781
4782/** \#PF(n) - 0e. */
4783VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4784{
4785 uint16_t uErr;
4786 switch (rc)
4787 {
4788 case VERR_PAGE_NOT_PRESENT:
4789 case VERR_PAGE_TABLE_NOT_PRESENT:
4790 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4791 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4792 uErr = 0;
4793 break;
4794
4795 case VERR_RESERVED_PAGE_TABLE_BITS:
4796 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4797 break;
4798
4799 default:
4800 AssertMsgFailed(("%Rrc\n", rc));
4801 RT_FALL_THRU();
4802 case VERR_ACCESS_DENIED:
4803 uErr = X86_TRAP_PF_P;
4804 break;
4805 }
4806
4807 if (IEM_GET_CPL(pVCpu) == 3)
4808 uErr |= X86_TRAP_PF_US;
4809
4810 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4811 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4812 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4813 uErr |= X86_TRAP_PF_ID;
4814
4815#if 0 /* This is so much non-sense, really. Why was it done like that? */
4816 /* Note! RW access callers reporting a WRITE protection fault, will clear
4817 the READ flag before calling. So, read-modify-write accesses (RW)
4818 can safely be reported as READ faults. */
4819 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4820 uErr |= X86_TRAP_PF_RW;
4821#else
4822 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4823 {
4824 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4825 /// (regardless of outcome of the comparison in the latter case).
4826 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4827 uErr |= X86_TRAP_PF_RW;
4828 }
4829#endif
4830
4831 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4832 of the memory operand rather than at the start of it. (Not sure what
4833 happens if it crosses a page boundrary.) The current heuristics for
4834 this is to report the #PF for the last byte if the access is more than
4835 64 bytes. This is probably not correct, but we can work that out later,
4836 main objective now is to get FXSAVE to work like for real hardware and
4837 make bs3-cpu-basic2 work. */
4838 if (cbAccess <= 64)
4839 { /* likely*/ }
4840 else
4841 GCPtrWhere += cbAccess - 1;
4842
4843 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4844 uErr, GCPtrWhere);
4845}
4846
4847#ifdef IEM_WITH_SETJMP
4848/** \#PF(n) - 0e, longjmp. */
4849DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4850 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4851{
4852 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4853}
4854#endif
4855
4856
4857/** \#MF(0) - 10. */
4858VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4859{
4860 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4862
4863 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4864 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4865 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4866}
4867
4868#ifdef IEM_WITH_SETJMP
4869/** \#MF(0) - 10, longjmp. */
4870DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4871{
4872 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4873}
4874#endif
4875
4876
4877/** \#AC(0) - 11. */
4878VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4879{
4880 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4881}
4882
4883#ifdef IEM_WITH_SETJMP
4884/** \#AC(0) - 11, longjmp. */
4885DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4886{
4887 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4888}
4889#endif
4890
4891
4892/** \#XF(0)/\#XM(0) - 19. */
4893VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4894{
4895 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4896}
4897
4898
4899#ifdef IEM_WITH_SETJMP
4900/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4901DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4902{
4903 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4904}
4905#endif
4906
4907
4908/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4909IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4910{
4911 NOREF(cbInstr);
4912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4913}
4914
4915
4916/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4917IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4918{
4919 NOREF(cbInstr);
4920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4921}
4922
4923
4924/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4925IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4926{
4927 NOREF(cbInstr);
4928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4929}
4930
4931
4932/** @} */
4933
4934/** @name Common opcode decoders.
4935 * @{
4936 */
4937//#include <iprt/mem.h>
4938
4939/**
4940 * Used to add extra details about a stub case.
4941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4942 */
4943void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4944{
4945#if defined(LOG_ENABLED) && defined(IN_RING3)
4946 PVM pVM = pVCpu->CTX_SUFF(pVM);
4947 char szRegs[4096];
4948 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4949 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4950 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4951 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4952 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4953 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4954 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4955 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4956 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4957 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4958 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4959 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4960 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4961 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4962 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4963 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4964 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4965 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4966 " efer=%016VR{efer}\n"
4967 " pat=%016VR{pat}\n"
4968 " sf_mask=%016VR{sf_mask}\n"
4969 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4970 " lstar=%016VR{lstar}\n"
4971 " star=%016VR{star} cstar=%016VR{cstar}\n"
4972 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4973 );
4974
4975 char szInstr[256];
4976 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4977 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4978 szInstr, sizeof(szInstr), NULL);
4979
4980 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4981#else
4982 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4983#endif
4984}
4985
4986/** @} */
4987
4988
4989
4990/** @name Register Access.
4991 * @{
4992 */
4993
4994/**
4995 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4996 *
4997 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4998 * segment limit.
4999 *
5000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5001 * @param cbInstr Instruction size.
5002 * @param offNextInstr The offset of the next instruction.
5003 * @param enmEffOpSize Effective operand size.
5004 */
5005VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5006 IEMMODE enmEffOpSize) RT_NOEXCEPT
5007{
5008 switch (enmEffOpSize)
5009 {
5010 case IEMMODE_16BIT:
5011 {
5012 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
5013 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5014 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5015 pVCpu->cpum.GstCtx.rip = uNewIp;
5016 else
5017 return iemRaiseGeneralProtectionFault0(pVCpu);
5018 break;
5019 }
5020
5021 case IEMMODE_32BIT:
5022 {
5023 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5024 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5025
5026 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5027 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5028 pVCpu->cpum.GstCtx.rip = uNewEip;
5029 else
5030 return iemRaiseGeneralProtectionFault0(pVCpu);
5031 break;
5032 }
5033
5034 case IEMMODE_64BIT:
5035 {
5036 Assert(IEM_IS_64BIT_CODE(pVCpu));
5037
5038 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5039 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5040 pVCpu->cpum.GstCtx.rip = uNewRip;
5041 else
5042 return iemRaiseGeneralProtectionFault0(pVCpu);
5043 break;
5044 }
5045
5046 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5047 }
5048
5049#ifndef IEM_WITH_CODE_TLB
5050 /* Flush the prefetch buffer. */
5051 pVCpu->iem.s.cbOpcode = cbInstr;
5052#endif
5053
5054 /*
5055 * Clear RF and finish the instruction (maybe raise #DB).
5056 */
5057 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5058}
5059
5060
5061/**
5062 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5063 *
5064 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5065 * segment limit.
5066 *
5067 * @returns Strict VBox status code.
5068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5069 * @param cbInstr Instruction size.
5070 * @param offNextInstr The offset of the next instruction.
5071 */
5072VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5073{
5074 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5075
5076 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5077 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5078 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5079 pVCpu->cpum.GstCtx.rip = uNewIp;
5080 else
5081 return iemRaiseGeneralProtectionFault0(pVCpu);
5082
5083#ifndef IEM_WITH_CODE_TLB
5084 /* Flush the prefetch buffer. */
5085 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5086#endif
5087
5088 /*
5089 * Clear RF and finish the instruction (maybe raise #DB).
5090 */
5091 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5092}
5093
5094
5095/**
5096 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5097 *
5098 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5099 * segment limit.
5100 *
5101 * @returns Strict VBox status code.
5102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5103 * @param cbInstr Instruction size.
5104 * @param offNextInstr The offset of the next instruction.
5105 * @param enmEffOpSize Effective operand size.
5106 */
5107VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5108 IEMMODE enmEffOpSize) RT_NOEXCEPT
5109{
5110 if (enmEffOpSize == IEMMODE_32BIT)
5111 {
5112 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5113
5114 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5115 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5116 pVCpu->cpum.GstCtx.rip = uNewEip;
5117 else
5118 return iemRaiseGeneralProtectionFault0(pVCpu);
5119 }
5120 else
5121 {
5122 Assert(enmEffOpSize == IEMMODE_64BIT);
5123
5124 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5125 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5126 pVCpu->cpum.GstCtx.rip = uNewRip;
5127 else
5128 return iemRaiseGeneralProtectionFault0(pVCpu);
5129 }
5130
5131#ifndef IEM_WITH_CODE_TLB
5132 /* Flush the prefetch buffer. */
5133 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5134#endif
5135
5136 /*
5137 * Clear RF and finish the instruction (maybe raise #DB).
5138 */
5139 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5140}
5141
5142/** @} */
5143
5144
5145/** @name FPU access and helpers.
5146 *
5147 * @{
5148 */
5149
5150/**
5151 * Updates the x87.DS and FPUDP registers.
5152 *
5153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5154 * @param pFpuCtx The FPU context.
5155 * @param iEffSeg The effective segment register.
5156 * @param GCPtrEff The effective address relative to @a iEffSeg.
5157 */
5158DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5159{
5160 RTSEL sel;
5161 switch (iEffSeg)
5162 {
5163 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5164 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5165 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5166 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5167 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5168 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5169 default:
5170 AssertMsgFailed(("%d\n", iEffSeg));
5171 sel = pVCpu->cpum.GstCtx.ds.Sel;
5172 }
5173 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5174 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5175 {
5176 pFpuCtx->DS = 0;
5177 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5178 }
5179 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5180 {
5181 pFpuCtx->DS = sel;
5182 pFpuCtx->FPUDP = GCPtrEff;
5183 }
5184 else
5185 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5186}
5187
5188
5189/**
5190 * Rotates the stack registers in the push direction.
5191 *
5192 * @param pFpuCtx The FPU context.
5193 * @remarks This is a complete waste of time, but fxsave stores the registers in
5194 * stack order.
5195 */
5196DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5197{
5198 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5199 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5200 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5201 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5202 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5203 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5204 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5205 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5206 pFpuCtx->aRegs[0].r80 = r80Tmp;
5207}
5208
5209
5210/**
5211 * Rotates the stack registers in the pop direction.
5212 *
5213 * @param pFpuCtx The FPU context.
5214 * @remarks This is a complete waste of time, but fxsave stores the registers in
5215 * stack order.
5216 */
5217DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5218{
5219 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5220 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5221 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5222 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5223 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5224 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5225 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5226 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5227 pFpuCtx->aRegs[7].r80 = r80Tmp;
5228}
5229
5230
5231/**
5232 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5233 * exception prevents it.
5234 *
5235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5236 * @param pResult The FPU operation result to push.
5237 * @param pFpuCtx The FPU context.
5238 */
5239static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5240{
5241 /* Update FSW and bail if there are pending exceptions afterwards. */
5242 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5243 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5244 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5245 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5246 {
5247 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5248 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5249 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5250 pFpuCtx->FSW = fFsw;
5251 return;
5252 }
5253
5254 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5255 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5256 {
5257 /* All is fine, push the actual value. */
5258 pFpuCtx->FTW |= RT_BIT(iNewTop);
5259 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5260 }
5261 else if (pFpuCtx->FCW & X86_FCW_IM)
5262 {
5263 /* Masked stack overflow, push QNaN. */
5264 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5265 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5266 }
5267 else
5268 {
5269 /* Raise stack overflow, don't push anything. */
5270 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5271 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5272 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5273 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5274 return;
5275 }
5276
5277 fFsw &= ~X86_FSW_TOP_MASK;
5278 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5279 pFpuCtx->FSW = fFsw;
5280
5281 iemFpuRotateStackPush(pFpuCtx);
5282 RT_NOREF(pVCpu);
5283}
5284
5285
5286/**
5287 * Stores a result in a FPU register and updates the FSW and FTW.
5288 *
5289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5290 * @param pFpuCtx The FPU context.
5291 * @param pResult The result to store.
5292 * @param iStReg Which FPU register to store it in.
5293 */
5294static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5295{
5296 Assert(iStReg < 8);
5297 uint16_t fNewFsw = pFpuCtx->FSW;
5298 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5299 fNewFsw &= ~X86_FSW_C_MASK;
5300 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5301 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5302 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5303 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5304 pFpuCtx->FSW = fNewFsw;
5305 pFpuCtx->FTW |= RT_BIT(iReg);
5306 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5307 RT_NOREF(pVCpu);
5308}
5309
5310
5311/**
5312 * Only updates the FPU status word (FSW) with the result of the current
5313 * instruction.
5314 *
5315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5316 * @param pFpuCtx The FPU context.
5317 * @param u16FSW The FSW output of the current instruction.
5318 */
5319static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5320{
5321 uint16_t fNewFsw = pFpuCtx->FSW;
5322 fNewFsw &= ~X86_FSW_C_MASK;
5323 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5324 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5325 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5327 pFpuCtx->FSW = fNewFsw;
5328 RT_NOREF(pVCpu);
5329}
5330
5331
5332/**
5333 * Pops one item off the FPU stack if no pending exception prevents it.
5334 *
5335 * @param pFpuCtx The FPU context.
5336 */
5337static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5338{
5339 /* Check pending exceptions. */
5340 uint16_t uFSW = pFpuCtx->FSW;
5341 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5342 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5343 return;
5344
5345 /* TOP--. */
5346 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5347 uFSW &= ~X86_FSW_TOP_MASK;
5348 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5349 pFpuCtx->FSW = uFSW;
5350
5351 /* Mark the previous ST0 as empty. */
5352 iOldTop >>= X86_FSW_TOP_SHIFT;
5353 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5354
5355 /* Rotate the registers. */
5356 iemFpuRotateStackPop(pFpuCtx);
5357}
5358
5359
5360/**
5361 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5362 *
5363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5364 * @param pResult The FPU operation result to push.
5365 * @param uFpuOpcode The FPU opcode value.
5366 */
5367void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5368{
5369 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5370 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5371 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5372}
5373
5374
5375/**
5376 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5377 * and sets FPUDP and FPUDS.
5378 *
5379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5380 * @param pResult The FPU operation result to push.
5381 * @param iEffSeg The effective segment register.
5382 * @param GCPtrEff The effective address relative to @a iEffSeg.
5383 * @param uFpuOpcode The FPU opcode value.
5384 */
5385void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5386 uint16_t uFpuOpcode) RT_NOEXCEPT
5387{
5388 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5389 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5390 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5391 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5392}
5393
5394
5395/**
5396 * Replace ST0 with the first value and push the second onto the FPU stack,
5397 * unless a pending exception prevents it.
5398 *
5399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5400 * @param pResult The FPU operation result to store and push.
5401 * @param uFpuOpcode The FPU opcode value.
5402 */
5403void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5404{
5405 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5406 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5407
5408 /* Update FSW and bail if there are pending exceptions afterwards. */
5409 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5410 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5411 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5412 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5413 {
5414 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5415 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5416 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5417 pFpuCtx->FSW = fFsw;
5418 return;
5419 }
5420
5421 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5422 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5423 {
5424 /* All is fine, push the actual value. */
5425 pFpuCtx->FTW |= RT_BIT(iNewTop);
5426 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5427 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5428 }
5429 else if (pFpuCtx->FCW & X86_FCW_IM)
5430 {
5431 /* Masked stack overflow, push QNaN. */
5432 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5433 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5434 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5435 }
5436 else
5437 {
5438 /* Raise stack overflow, don't push anything. */
5439 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5440 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5441 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5442 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5443 return;
5444 }
5445
5446 fFsw &= ~X86_FSW_TOP_MASK;
5447 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5448 pFpuCtx->FSW = fFsw;
5449
5450 iemFpuRotateStackPush(pFpuCtx);
5451}
5452
5453
5454/**
5455 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5456 * FOP.
5457 *
5458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5459 * @param pResult The result to store.
5460 * @param iStReg Which FPU register to store it in.
5461 * @param uFpuOpcode The FPU opcode value.
5462 */
5463void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5464{
5465 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5466 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5467 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5468}
5469
5470
5471/**
5472 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5473 * FOP, and then pops the stack.
5474 *
5475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5476 * @param pResult The result to store.
5477 * @param iStReg Which FPU register to store it in.
5478 * @param uFpuOpcode The FPU opcode value.
5479 */
5480void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5481{
5482 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5483 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5484 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5485 iemFpuMaybePopOne(pFpuCtx);
5486}
5487
5488
5489/**
5490 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5491 * FPUDP, and FPUDS.
5492 *
5493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5494 * @param pResult The result to store.
5495 * @param iStReg Which FPU register to store it in.
5496 * @param iEffSeg The effective memory operand selector register.
5497 * @param GCPtrEff The effective memory operand offset.
5498 * @param uFpuOpcode The FPU opcode value.
5499 */
5500void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5501 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5502{
5503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5504 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5505 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5506 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5507}
5508
5509
5510/**
5511 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5512 * FPUDP, and FPUDS, and then pops the stack.
5513 *
5514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5515 * @param pResult The result to store.
5516 * @param iStReg Which FPU register to store it in.
5517 * @param iEffSeg The effective memory operand selector register.
5518 * @param GCPtrEff The effective memory operand offset.
5519 * @param uFpuOpcode The FPU opcode value.
5520 */
5521void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5522 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5523{
5524 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5525 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5526 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5527 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5528 iemFpuMaybePopOne(pFpuCtx);
5529}
5530
5531
5532/**
5533 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5534 *
5535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5536 * @param uFpuOpcode The FPU opcode value.
5537 */
5538void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5539{
5540 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5541 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5542}
5543
5544
5545/**
5546 * Updates the FSW, FOP, FPUIP, and FPUCS.
5547 *
5548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5549 * @param u16FSW The FSW from the current instruction.
5550 * @param uFpuOpcode The FPU opcode value.
5551 */
5552void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5553{
5554 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5555 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5556 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5557}
5558
5559
5560/**
5561 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5562 *
5563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5564 * @param u16FSW The FSW from the current instruction.
5565 * @param uFpuOpcode The FPU opcode value.
5566 */
5567void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5568{
5569 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5570 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5571 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5572 iemFpuMaybePopOne(pFpuCtx);
5573}
5574
5575
5576/**
5577 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5578 *
5579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5580 * @param u16FSW The FSW from the current instruction.
5581 * @param iEffSeg The effective memory operand selector register.
5582 * @param GCPtrEff The effective memory operand offset.
5583 * @param uFpuOpcode The FPU opcode value.
5584 */
5585void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5586{
5587 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5588 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5589 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5590 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5591}
5592
5593
5594/**
5595 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5596 *
5597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5598 * @param u16FSW The FSW from the current instruction.
5599 * @param uFpuOpcode The FPU opcode value.
5600 */
5601void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5602{
5603 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5604 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5605 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5606 iemFpuMaybePopOne(pFpuCtx);
5607 iemFpuMaybePopOne(pFpuCtx);
5608}
5609
5610
5611/**
5612 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5613 *
5614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5615 * @param u16FSW The FSW from the current instruction.
5616 * @param iEffSeg The effective memory operand selector register.
5617 * @param GCPtrEff The effective memory operand offset.
5618 * @param uFpuOpcode The FPU opcode value.
5619 */
5620void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5621 uint16_t uFpuOpcode) RT_NOEXCEPT
5622{
5623 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5624 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5625 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5626 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5627 iemFpuMaybePopOne(pFpuCtx);
5628}
5629
5630
5631/**
5632 * Worker routine for raising an FPU stack underflow exception.
5633 *
5634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5635 * @param pFpuCtx The FPU context.
5636 * @param iStReg The stack register being accessed.
5637 */
5638static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5639{
5640 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5641 if (pFpuCtx->FCW & X86_FCW_IM)
5642 {
5643 /* Masked underflow. */
5644 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5645 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5646 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5647 if (iStReg != UINT8_MAX)
5648 {
5649 pFpuCtx->FTW |= RT_BIT(iReg);
5650 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5651 }
5652 }
5653 else
5654 {
5655 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5657 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5658 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5659 }
5660 RT_NOREF(pVCpu);
5661}
5662
5663
5664/**
5665 * Raises a FPU stack underflow exception.
5666 *
5667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5668 * @param iStReg The destination register that should be loaded
5669 * with QNaN if \#IS is not masked. Specify
5670 * UINT8_MAX if none (like for fcom).
5671 * @param uFpuOpcode The FPU opcode value.
5672 */
5673void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5674{
5675 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5676 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5677 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5678}
5679
5680
5681void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5682{
5683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5684 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5685 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5686 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5687}
5688
5689
5690void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5691{
5692 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5693 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5694 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5695 iemFpuMaybePopOne(pFpuCtx);
5696}
5697
5698
5699void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5700 uint16_t uFpuOpcode) RT_NOEXCEPT
5701{
5702 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5703 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5704 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5705 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5706 iemFpuMaybePopOne(pFpuCtx);
5707}
5708
5709
5710void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5711{
5712 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5713 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5714 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5715 iemFpuMaybePopOne(pFpuCtx);
5716 iemFpuMaybePopOne(pFpuCtx);
5717}
5718
5719
5720void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5721{
5722 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5723 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5724
5725 if (pFpuCtx->FCW & X86_FCW_IM)
5726 {
5727 /* Masked overflow - Push QNaN. */
5728 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5729 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5730 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5731 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5732 pFpuCtx->FTW |= RT_BIT(iNewTop);
5733 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5734 iemFpuRotateStackPush(pFpuCtx);
5735 }
5736 else
5737 {
5738 /* Exception pending - don't change TOP or the register stack. */
5739 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5740 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5741 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5743 }
5744}
5745
5746
5747void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5748{
5749 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5750 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5751
5752 if (pFpuCtx->FCW & X86_FCW_IM)
5753 {
5754 /* Masked overflow - Push QNaN. */
5755 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5756 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5757 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5758 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5759 pFpuCtx->FTW |= RT_BIT(iNewTop);
5760 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5761 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5762 iemFpuRotateStackPush(pFpuCtx);
5763 }
5764 else
5765 {
5766 /* Exception pending - don't change TOP or the register stack. */
5767 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5768 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5769 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5770 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5771 }
5772}
5773
5774
5775/**
5776 * Worker routine for raising an FPU stack overflow exception on a push.
5777 *
5778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5779 * @param pFpuCtx The FPU context.
5780 */
5781static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5782{
5783 if (pFpuCtx->FCW & X86_FCW_IM)
5784 {
5785 /* Masked overflow. */
5786 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5787 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5788 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5789 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5790 pFpuCtx->FTW |= RT_BIT(iNewTop);
5791 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5792 iemFpuRotateStackPush(pFpuCtx);
5793 }
5794 else
5795 {
5796 /* Exception pending - don't change TOP or the register stack. */
5797 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5798 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5799 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5800 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5801 }
5802 RT_NOREF(pVCpu);
5803}
5804
5805
5806/**
5807 * Raises a FPU stack overflow exception on a push.
5808 *
5809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5810 * @param uFpuOpcode The FPU opcode value.
5811 */
5812void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5813{
5814 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5815 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5816 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5817}
5818
5819
5820/**
5821 * Raises a FPU stack overflow exception on a push with a memory operand.
5822 *
5823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5824 * @param iEffSeg The effective memory operand selector register.
5825 * @param GCPtrEff The effective memory operand offset.
5826 * @param uFpuOpcode The FPU opcode value.
5827 */
5828void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5829{
5830 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5831 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5832 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5833 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5834}
5835
5836/** @} */
5837
5838
5839/** @name Memory access.
5840 *
5841 * @{
5842 */
5843
5844#undef LOG_GROUP
5845#define LOG_GROUP LOG_GROUP_IEM_MEM
5846
5847/**
5848 * Updates the IEMCPU::cbWritten counter if applicable.
5849 *
5850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5851 * @param fAccess The access being accounted for.
5852 * @param cbMem The access size.
5853 */
5854DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5855{
5856 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5857 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5858 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5859}
5860
5861
5862/**
5863 * Applies the segment limit, base and attributes.
5864 *
5865 * This may raise a \#GP or \#SS.
5866 *
5867 * @returns VBox strict status code.
5868 *
5869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5870 * @param fAccess The kind of access which is being performed.
5871 * @param iSegReg The index of the segment register to apply.
5872 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5873 * TSS, ++).
5874 * @param cbMem The access size.
5875 * @param pGCPtrMem Pointer to the guest memory address to apply
5876 * segmentation to. Input and output parameter.
5877 */
5878VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5879{
5880 if (iSegReg == UINT8_MAX)
5881 return VINF_SUCCESS;
5882
5883 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5884 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5885 switch (IEM_GET_CPU_MODE(pVCpu))
5886 {
5887 case IEMMODE_16BIT:
5888 case IEMMODE_32BIT:
5889 {
5890 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5891 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5892
5893 if ( pSel->Attr.n.u1Present
5894 && !pSel->Attr.n.u1Unusable)
5895 {
5896 Assert(pSel->Attr.n.u1DescType);
5897 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5898 {
5899 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5900 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5901 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5902
5903 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5904 {
5905 /** @todo CPL check. */
5906 }
5907
5908 /*
5909 * There are two kinds of data selectors, normal and expand down.
5910 */
5911 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5912 {
5913 if ( GCPtrFirst32 > pSel->u32Limit
5914 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5915 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5916 }
5917 else
5918 {
5919 /*
5920 * The upper boundary is defined by the B bit, not the G bit!
5921 */
5922 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5923 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5924 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5925 }
5926 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5927 }
5928 else
5929 {
5930 /*
5931 * Code selector and usually be used to read thru, writing is
5932 * only permitted in real and V8086 mode.
5933 */
5934 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5935 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5936 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5937 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5938 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5939
5940 if ( GCPtrFirst32 > pSel->u32Limit
5941 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5942 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5943
5944 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5945 {
5946 /** @todo CPL check. */
5947 }
5948
5949 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5950 }
5951 }
5952 else
5953 return iemRaiseGeneralProtectionFault0(pVCpu);
5954 return VINF_SUCCESS;
5955 }
5956
5957 case IEMMODE_64BIT:
5958 {
5959 RTGCPTR GCPtrMem = *pGCPtrMem;
5960 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5961 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5962
5963 Assert(cbMem >= 1);
5964 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5965 return VINF_SUCCESS;
5966 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5967 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5968 return iemRaiseGeneralProtectionFault0(pVCpu);
5969 }
5970
5971 default:
5972 AssertFailedReturn(VERR_IEM_IPE_7);
5973 }
5974}
5975
5976
5977/**
5978 * Translates a virtual address to a physical physical address and checks if we
5979 * can access the page as specified.
5980 *
5981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5982 * @param GCPtrMem The virtual address.
5983 * @param cbAccess The access size, for raising \#PF correctly for
5984 * FXSAVE and such.
5985 * @param fAccess The intended access.
5986 * @param pGCPhysMem Where to return the physical address.
5987 */
5988VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5989 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5990{
5991 /** @todo Need a different PGM interface here. We're currently using
5992 * generic / REM interfaces. this won't cut it for R0. */
5993 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5994 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5995 * here. */
5996 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5997 PGMPTWALKFAST WalkFast;
5998 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5999 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6000 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6001 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6002 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6003 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6004 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6005 fQPage |= PGMQPAGE_F_USER_MODE;
6006 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6007 if (RT_SUCCESS(rc))
6008 {
6009 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6010
6011 /* If the page is writable and does not have the no-exec bit set, all
6012 access is allowed. Otherwise we'll have to check more carefully... */
6013 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6014 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6015 || (WalkFast.fEffective & X86_PTE_RW)
6016 || ( ( IEM_GET_CPL(pVCpu) != 3
6017 || (fAccess & IEM_ACCESS_WHAT_SYS))
6018 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6019 && ( (WalkFast.fEffective & X86_PTE_US)
6020 || IEM_GET_CPL(pVCpu) != 3
6021 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6022 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6023 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6024 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6025 )
6026 );
6027
6028 /* PGMGstQueryPageFast sets the A & D bits. */
6029 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6030 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6031
6032 *pGCPhysMem = WalkFast.GCPhys;
6033 return VINF_SUCCESS;
6034 }
6035
6036 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6037 /** @todo Check unassigned memory in unpaged mode. */
6038#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6039 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6040 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6041#endif
6042 *pGCPhysMem = NIL_RTGCPHYS;
6043 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6044}
6045
6046#if 0 /*unused*/
6047/**
6048 * Looks up a memory mapping entry.
6049 *
6050 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6052 * @param pvMem The memory address.
6053 * @param fAccess The access to.
6054 */
6055DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6056{
6057 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6058 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6059 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6060 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6061 return 0;
6062 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6063 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6064 return 1;
6065 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6066 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6067 return 2;
6068 return VERR_NOT_FOUND;
6069}
6070#endif
6071
6072/**
6073 * Finds a free memmap entry when using iNextMapping doesn't work.
6074 *
6075 * @returns Memory mapping index, 1024 on failure.
6076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6077 */
6078static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6079{
6080 /*
6081 * The easy case.
6082 */
6083 if (pVCpu->iem.s.cActiveMappings == 0)
6084 {
6085 pVCpu->iem.s.iNextMapping = 1;
6086 return 0;
6087 }
6088
6089 /* There should be enough mappings for all instructions. */
6090 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6091
6092 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6093 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6094 return i;
6095
6096 AssertFailedReturn(1024);
6097}
6098
6099
6100/**
6101 * Commits a bounce buffer that needs writing back and unmaps it.
6102 *
6103 * @returns Strict VBox status code.
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 * @param iMemMap The index of the buffer to commit.
6106 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6107 * Always false in ring-3, obviously.
6108 */
6109static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6110{
6111 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6112 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6113#ifdef IN_RING3
6114 Assert(!fPostponeFail);
6115 RT_NOREF_PV(fPostponeFail);
6116#endif
6117
6118 /*
6119 * Do the writing.
6120 */
6121 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6122 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6123 {
6124 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6125 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6126 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6127 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6128 {
6129 /*
6130 * Carefully and efficiently dealing with access handler return
6131 * codes make this a little bloated.
6132 */
6133 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6134 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6135 pbBuf,
6136 cbFirst,
6137 PGMACCESSORIGIN_IEM);
6138 if (rcStrict == VINF_SUCCESS)
6139 {
6140 if (cbSecond)
6141 {
6142 rcStrict = PGMPhysWrite(pVM,
6143 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6144 pbBuf + cbFirst,
6145 cbSecond,
6146 PGMACCESSORIGIN_IEM);
6147 if (rcStrict == VINF_SUCCESS)
6148 { /* nothing */ }
6149 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6150 {
6151 LogEx(LOG_GROUP_IEM,
6152 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6153 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6154 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6155 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6156 }
6157#ifndef IN_RING3
6158 else if (fPostponeFail)
6159 {
6160 LogEx(LOG_GROUP_IEM,
6161 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6163 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6165 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6166 return iemSetPassUpStatus(pVCpu, rcStrict);
6167 }
6168#endif
6169 else
6170 {
6171 LogEx(LOG_GROUP_IEM,
6172 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6173 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6174 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6175 return rcStrict;
6176 }
6177 }
6178 }
6179 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6180 {
6181 if (!cbSecond)
6182 {
6183 LogEx(LOG_GROUP_IEM,
6184 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6185 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6186 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6187 }
6188 else
6189 {
6190 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6192 pbBuf + cbFirst,
6193 cbSecond,
6194 PGMACCESSORIGIN_IEM);
6195 if (rcStrict2 == VINF_SUCCESS)
6196 {
6197 LogEx(LOG_GROUP_IEM,
6198 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6199 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6200 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6201 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6202 }
6203 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6204 {
6205 LogEx(LOG_GROUP_IEM,
6206 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6207 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6208 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6209 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6210 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6211 }
6212#ifndef IN_RING3
6213 else if (fPostponeFail)
6214 {
6215 LogEx(LOG_GROUP_IEM,
6216 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6218 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6219 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6220 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6221 return iemSetPassUpStatus(pVCpu, rcStrict);
6222 }
6223#endif
6224 else
6225 {
6226 LogEx(LOG_GROUP_IEM,
6227 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6229 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6230 return rcStrict2;
6231 }
6232 }
6233 }
6234#ifndef IN_RING3
6235 else if (fPostponeFail)
6236 {
6237 LogEx(LOG_GROUP_IEM,
6238 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6239 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6240 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6241 if (!cbSecond)
6242 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6243 else
6244 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6245 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6246 return iemSetPassUpStatus(pVCpu, rcStrict);
6247 }
6248#endif
6249 else
6250 {
6251 LogEx(LOG_GROUP_IEM,
6252 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6253 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6254 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6255 return rcStrict;
6256 }
6257 }
6258 else
6259 {
6260 /*
6261 * No access handlers, much simpler.
6262 */
6263 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6264 if (RT_SUCCESS(rc))
6265 {
6266 if (cbSecond)
6267 {
6268 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6269 if (RT_SUCCESS(rc))
6270 { /* likely */ }
6271 else
6272 {
6273 LogEx(LOG_GROUP_IEM,
6274 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6275 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6276 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6277 return rc;
6278 }
6279 }
6280 }
6281 else
6282 {
6283 LogEx(LOG_GROUP_IEM,
6284 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6285 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6286 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6287 return rc;
6288 }
6289 }
6290 }
6291
6292#if defined(IEM_LOG_MEMORY_WRITES)
6293 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6294 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6295 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6296 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6297 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6298 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6299
6300 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6301 g_cbIemWrote = cbWrote;
6302 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6303#endif
6304
6305 /*
6306 * Free the mapping entry.
6307 */
6308 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6309 Assert(pVCpu->iem.s.cActiveMappings != 0);
6310 pVCpu->iem.s.cActiveMappings--;
6311 return VINF_SUCCESS;
6312}
6313
6314
6315/**
6316 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6317 */
6318DECL_FORCE_INLINE(uint32_t)
6319iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6320{
6321 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6322 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6323 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6324 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6325}
6326
6327
6328/**
6329 * iemMemMap worker that deals with a request crossing pages.
6330 */
6331static VBOXSTRICTRC
6332iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6333 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6334{
6335 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6336 Assert(cbMem <= GUEST_PAGE_SIZE);
6337
6338 /*
6339 * Do the address translations.
6340 */
6341 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6342 RTGCPHYS GCPhysFirst;
6343 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6344 if (rcStrict != VINF_SUCCESS)
6345 return rcStrict;
6346 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6347
6348 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6349 RTGCPHYS GCPhysSecond;
6350 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6351 cbSecondPage, fAccess, &GCPhysSecond);
6352 if (rcStrict != VINF_SUCCESS)
6353 return rcStrict;
6354 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6355 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6356
6357 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6358
6359 /*
6360 * Check for data breakpoints.
6361 */
6362 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6363 { /* likely */ }
6364 else
6365 {
6366 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6367 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6368 cbSecondPage, fAccess);
6369 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6370 if (fDataBps > 1)
6371 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6372 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6373 }
6374
6375 /*
6376 * Read in the current memory content if it's a read, execute or partial
6377 * write access.
6378 */
6379 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6380
6381 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6382 {
6383 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6384 {
6385 /*
6386 * Must carefully deal with access handler status codes here,
6387 * makes the code a bit bloated.
6388 */
6389 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6390 if (rcStrict == VINF_SUCCESS)
6391 {
6392 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6393 if (rcStrict == VINF_SUCCESS)
6394 { /*likely */ }
6395 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6397 else
6398 {
6399 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6400 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6401 return rcStrict;
6402 }
6403 }
6404 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6405 {
6406 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6407 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6408 {
6409 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6410 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6411 }
6412 else
6413 {
6414 LogEx(LOG_GROUP_IEM,
6415 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6416 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6417 return rcStrict2;
6418 }
6419 }
6420 else
6421 {
6422 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6423 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6424 return rcStrict;
6425 }
6426 }
6427 else
6428 {
6429 /*
6430 * No informational status codes here, much more straight forward.
6431 */
6432 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6433 if (RT_SUCCESS(rc))
6434 {
6435 Assert(rc == VINF_SUCCESS);
6436 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6437 if (RT_SUCCESS(rc))
6438 Assert(rc == VINF_SUCCESS);
6439 else
6440 {
6441 LogEx(LOG_GROUP_IEM,
6442 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6443 return rc;
6444 }
6445 }
6446 else
6447 {
6448 LogEx(LOG_GROUP_IEM,
6449 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6450 return rc;
6451 }
6452 }
6453 }
6454#ifdef VBOX_STRICT
6455 else
6456 memset(pbBuf, 0xcc, cbMem);
6457 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6458 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6459#endif
6460 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6461
6462 /*
6463 * Commit the bounce buffer entry.
6464 */
6465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6467 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6468 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6469 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6470 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6471 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6472 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6473 pVCpu->iem.s.cActiveMappings++;
6474
6475 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6476 *ppvMem = pbBuf;
6477 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6478 return VINF_SUCCESS;
6479}
6480
6481
6482/**
6483 * iemMemMap woker that deals with iemMemPageMap failures.
6484 */
6485static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6486 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6487{
6488 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6489
6490 /*
6491 * Filter out conditions we can handle and the ones which shouldn't happen.
6492 */
6493 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6494 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6495 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6496 {
6497 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6498 return rcMap;
6499 }
6500 pVCpu->iem.s.cPotentialExits++;
6501
6502 /*
6503 * Read in the current memory content if it's a read, execute or partial
6504 * write access.
6505 */
6506 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6507 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6508 {
6509 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6510 memset(pbBuf, 0xff, cbMem);
6511 else
6512 {
6513 int rc;
6514 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6515 {
6516 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6517 if (rcStrict == VINF_SUCCESS)
6518 { /* nothing */ }
6519 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6520 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6521 else
6522 {
6523 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6524 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6525 return rcStrict;
6526 }
6527 }
6528 else
6529 {
6530 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6531 if (RT_SUCCESS(rc))
6532 { /* likely */ }
6533 else
6534 {
6535 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6536 GCPhysFirst, rc));
6537 return rc;
6538 }
6539 }
6540 }
6541 }
6542#ifdef VBOX_STRICT
6543 else
6544 memset(pbBuf, 0xcc, cbMem);
6545#endif
6546#ifdef VBOX_STRICT
6547 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6548 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6549#endif
6550
6551 /*
6552 * Commit the bounce buffer entry.
6553 */
6554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6555 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6556 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6557 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6558 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6559 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6560 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6561 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6562 pVCpu->iem.s.cActiveMappings++;
6563
6564 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6565 *ppvMem = pbBuf;
6566 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6567 return VINF_SUCCESS;
6568}
6569
6570
6571
6572/**
6573 * Maps the specified guest memory for the given kind of access.
6574 *
6575 * This may be using bounce buffering of the memory if it's crossing a page
6576 * boundary or if there is an access handler installed for any of it. Because
6577 * of lock prefix guarantees, we're in for some extra clutter when this
6578 * happens.
6579 *
6580 * This may raise a \#GP, \#SS, \#PF or \#AC.
6581 *
6582 * @returns VBox strict status code.
6583 *
6584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6585 * @param ppvMem Where to return the pointer to the mapped memory.
6586 * @param pbUnmapInfo Where to return unmap info to be passed to
6587 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6588 * done.
6589 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6590 * 8, 12, 16, 32 or 512. When used by string operations
6591 * it can be up to a page.
6592 * @param iSegReg The index of the segment register to use for this
6593 * access. The base and limits are checked. Use UINT8_MAX
6594 * to indicate that no segmentation is required (for IDT,
6595 * GDT and LDT accesses).
6596 * @param GCPtrMem The address of the guest memory.
6597 * @param fAccess How the memory is being accessed. The
6598 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6599 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6600 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6601 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6602 * set.
6603 * @param uAlignCtl Alignment control:
6604 * - Bits 15:0 is the alignment mask.
6605 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6606 * IEM_MEMMAP_F_ALIGN_SSE, and
6607 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6608 * Pass zero to skip alignment.
6609 */
6610VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6611 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6612{
6613 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6614
6615 /*
6616 * Check the input and figure out which mapping entry to use.
6617 */
6618 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6619 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6620 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6621 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6622 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6623
6624 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6625 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6626 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6627 {
6628 iMemMap = iemMemMapFindFree(pVCpu);
6629 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6630 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6631 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6632 pVCpu->iem.s.aMemMappings[2].fAccess),
6633 VERR_IEM_IPE_9);
6634 }
6635
6636 /*
6637 * Map the memory, checking that we can actually access it. If something
6638 * slightly complicated happens, fall back on bounce buffering.
6639 */
6640 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6641 if (rcStrict == VINF_SUCCESS)
6642 { /* likely */ }
6643 else
6644 return rcStrict;
6645
6646 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6647 { /* likely */ }
6648 else
6649 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6650
6651 /*
6652 * Alignment check.
6653 */
6654 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6655 { /* likelyish */ }
6656 else
6657 {
6658 /* Misaligned access. */
6659 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6660 {
6661 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6662 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6663 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6664 {
6665 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6666
6667 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6668 { /* likely */ }
6669 else
6670 return iemRaiseAlignmentCheckException(pVCpu);
6671 }
6672 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6673 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6674 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6675 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6676 * that's what FXSAVE does on a 10980xe. */
6677 && iemMemAreAlignmentChecksEnabled(pVCpu))
6678 return iemRaiseAlignmentCheckException(pVCpu);
6679 else
6680 return iemRaiseGeneralProtectionFault0(pVCpu);
6681 }
6682
6683#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6684 /* If the access is atomic there are host platform alignmnet restrictions
6685 we need to conform with. */
6686 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6687# if defined(RT_ARCH_AMD64)
6688 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6689# elif defined(RT_ARCH_ARM64)
6690 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6691# else
6692# error port me
6693# endif
6694 )
6695 { /* okay */ }
6696 else
6697 {
6698 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6699 pVCpu->iem.s.cMisalignedAtomics += 1;
6700 return VINF_EM_EMULATE_SPLIT_LOCK;
6701 }
6702#endif
6703 }
6704
6705#ifdef IEM_WITH_DATA_TLB
6706 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6707
6708 /*
6709 * Get the TLB entry for this page and check PT flags.
6710 *
6711 * We reload the TLB entry if we need to set the dirty bit (accessed
6712 * should in theory always be set).
6713 */
6714 uint8_t *pbMem = NULL;
6715 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6716 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6717 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6718 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6719 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6720 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6721 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6722 {
6723# ifdef IEM_WITH_TLB_STATISTICS
6724 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6725#endif
6726
6727 /* If the page is either supervisor only or non-writable, we need to do
6728 more careful access checks. */
6729 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6730 {
6731 /* Write to read only memory? */
6732 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6733 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6734 && ( ( IEM_GET_CPL(pVCpu) == 3
6735 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6736 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6737 {
6738 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6739 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6740 }
6741
6742 /* Kernel memory accessed by userland? */
6743 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6744 && IEM_GET_CPL(pVCpu) == 3
6745 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6746 {
6747 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6748 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6749 }
6750 }
6751
6752 /* Look up the physical page info if necessary. */
6753 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6754# ifdef IN_RING3
6755 pbMem = pTlbe->pbMappingR3;
6756# else
6757 pbMem = NULL;
6758# endif
6759 else
6760 {
6761 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6762 { /* likely */ }
6763 else
6764 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6765 pTlbe->pbMappingR3 = NULL;
6766 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6767 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6768 &pbMem, &pTlbe->fFlagsAndPhysRev);
6769 AssertRCReturn(rc, rc);
6770# ifdef IN_RING3
6771 pTlbe->pbMappingR3 = pbMem;
6772# endif
6773 }
6774 }
6775 else
6776 {
6777 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6778
6779 /* This page table walking will set A bits as required by the access while performing the walk.
6780 ASSUMES these are set when the address is translated rather than on commit... */
6781 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6782 PGMPTWALKFAST WalkFast;
6783 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6784 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6785 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6786 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6787 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6788 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6789 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6790 fQPage |= PGMQPAGE_F_USER_MODE;
6791 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6792 if (RT_SUCCESS(rc))
6793 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6794 else
6795 {
6796 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6797# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6798 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6799 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6800# endif
6801 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6802 }
6803
6804 uint32_t fDataBps;
6805 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6806 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6807 {
6808 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6809 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6810 {
6811 pTlbe--;
6812 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6813 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6814 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6815 }
6816 else
6817 {
6818 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6819 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6820 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6821 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6822 }
6823 }
6824 else
6825 {
6826 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6827 to the page with the data access breakpoint armed on it to pass thru here. */
6828 if (fDataBps > 1)
6829 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6830 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6831 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6832 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6833 pTlbe->uTag = uTagNoRev;
6834 }
6835 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6836 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6837 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6838 pTlbe->GCPhys = GCPhysPg;
6839 pTlbe->pbMappingR3 = NULL;
6840 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6841 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6842 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6843 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6844 || IEM_GET_CPL(pVCpu) != 3
6845 || (fAccess & IEM_ACCESS_WHAT_SYS));
6846
6847 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
6848 {
6849 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
6850 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
6851 else
6852 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
6853 }
6854
6855 /* Resolve the physical address. */
6856 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6857 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6858 &pbMem, &pTlbe->fFlagsAndPhysRev);
6859 AssertRCReturn(rc, rc);
6860# ifdef IN_RING3
6861 pTlbe->pbMappingR3 = pbMem;
6862# endif
6863 }
6864
6865 /*
6866 * Check the physical page level access and mapping.
6867 */
6868 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6869 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6870 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6871 { /* probably likely */ }
6872 else
6873 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6874 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6875 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6876 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6877 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6878 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6879
6880 if (pbMem)
6881 {
6882 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6883 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6884 fAccess |= IEM_ACCESS_NOT_LOCKED;
6885 }
6886 else
6887 {
6888 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6889 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6890 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6891 if (rcStrict != VINF_SUCCESS)
6892 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6893 }
6894
6895 void * const pvMem = pbMem;
6896
6897 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6898 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6899 if (fAccess & IEM_ACCESS_TYPE_READ)
6900 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6901
6902#else /* !IEM_WITH_DATA_TLB */
6903
6904 RTGCPHYS GCPhysFirst;
6905 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6906 if (rcStrict != VINF_SUCCESS)
6907 return rcStrict;
6908
6909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6910 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6911 if (fAccess & IEM_ACCESS_TYPE_READ)
6912 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6913
6914 void *pvMem;
6915 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6916 if (rcStrict != VINF_SUCCESS)
6917 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6918
6919#endif /* !IEM_WITH_DATA_TLB */
6920
6921 /*
6922 * Fill in the mapping table entry.
6923 */
6924 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6925 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6926 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6927 pVCpu->iem.s.cActiveMappings += 1;
6928
6929 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6930 *ppvMem = pvMem;
6931 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6932 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6933 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6934
6935 return VINF_SUCCESS;
6936}
6937
6938
6939/**
6940 * Commits the guest memory if bounce buffered and unmaps it.
6941 *
6942 * @returns Strict VBox status code.
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 * @param bUnmapInfo Unmap info set by iemMemMap.
6945 */
6946VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6947{
6948 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6949 AssertMsgReturn( (bUnmapInfo & 0x08)
6950 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6951 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6952 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6953 VERR_NOT_FOUND);
6954
6955 /* If it's bounce buffered, we may need to write back the buffer. */
6956 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6957 {
6958 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6959 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6960 }
6961 /* Otherwise unlock it. */
6962 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6963 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6964
6965 /* Free the entry. */
6966 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6967 Assert(pVCpu->iem.s.cActiveMappings != 0);
6968 pVCpu->iem.s.cActiveMappings--;
6969 return VINF_SUCCESS;
6970}
6971
6972
6973/**
6974 * Rolls back the guest memory (conceptually only) and unmaps it.
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 * @param bUnmapInfo Unmap info set by iemMemMap.
6978 */
6979void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6980{
6981 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6982 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6983 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6984 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6985 == ((unsigned)bUnmapInfo >> 4),
6986 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6987
6988 /* Unlock it if necessary. */
6989 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6990 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6991
6992 /* Free the entry. */
6993 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6994 Assert(pVCpu->iem.s.cActiveMappings != 0);
6995 pVCpu->iem.s.cActiveMappings--;
6996}
6997
6998#ifdef IEM_WITH_SETJMP
6999
7000/**
7001 * Maps the specified guest memory for the given kind of access, longjmp on
7002 * error.
7003 *
7004 * This may be using bounce buffering of the memory if it's crossing a page
7005 * boundary or if there is an access handler installed for any of it. Because
7006 * of lock prefix guarantees, we're in for some extra clutter when this
7007 * happens.
7008 *
7009 * This may raise a \#GP, \#SS, \#PF or \#AC.
7010 *
7011 * @returns Pointer to the mapped memory.
7012 *
7013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7014 * @param bUnmapInfo Where to return unmap info to be passed to
7015 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
7016 * iemMemCommitAndUnmapWoSafeJmp,
7017 * iemMemCommitAndUnmapRoSafeJmp,
7018 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
7019 * when done.
7020 * @param cbMem The number of bytes to map. This is usually 1,
7021 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7022 * string operations it can be up to a page.
7023 * @param iSegReg The index of the segment register to use for
7024 * this access. The base and limits are checked.
7025 * Use UINT8_MAX to indicate that no segmentation
7026 * is required (for IDT, GDT and LDT accesses).
7027 * @param GCPtrMem The address of the guest memory.
7028 * @param fAccess How the memory is being accessed. The
7029 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7030 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7031 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7032 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7033 * set.
7034 * @param uAlignCtl Alignment control:
7035 * - Bits 15:0 is the alignment mask.
7036 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7037 * IEM_MEMMAP_F_ALIGN_SSE, and
7038 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7039 * Pass zero to skip alignment.
7040 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7041 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7042 * needs counting as such in the statistics.
7043 */
7044template<bool a_fSafeCall = false>
7045static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7046 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7047{
7048 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7049
7050 /*
7051 * Check the input, check segment access and adjust address
7052 * with segment base.
7053 */
7054 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7055 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7056 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7057
7058 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7059 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7060 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7061
7062 /*
7063 * Alignment check.
7064 */
7065 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7066 { /* likelyish */ }
7067 else
7068 {
7069 /* Misaligned access. */
7070 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7071 {
7072 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7073 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7074 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7075 {
7076 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7077
7078 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7079 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7080 }
7081 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7082 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7083 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7084 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7085 * that's what FXSAVE does on a 10980xe. */
7086 && iemMemAreAlignmentChecksEnabled(pVCpu))
7087 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7088 else
7089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7090 }
7091
7092#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7093 /* If the access is atomic there are host platform alignmnet restrictions
7094 we need to conform with. */
7095 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7096# if defined(RT_ARCH_AMD64)
7097 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7098# elif defined(RT_ARCH_ARM64)
7099 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7100# else
7101# error port me
7102# endif
7103 )
7104 { /* okay */ }
7105 else
7106 {
7107 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7108 pVCpu->iem.s.cMisalignedAtomics += 1;
7109 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7110 }
7111#endif
7112 }
7113
7114 /*
7115 * Figure out which mapping entry to use.
7116 */
7117 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7118 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7119 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7120 {
7121 iMemMap = iemMemMapFindFree(pVCpu);
7122 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7123 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7124 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7125 pVCpu->iem.s.aMemMappings[2].fAccess),
7126 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7127 }
7128
7129 /*
7130 * Crossing a page boundary?
7131 */
7132 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7133 { /* No (likely). */ }
7134 else
7135 {
7136 void *pvMem;
7137 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7138 if (rcStrict == VINF_SUCCESS)
7139 return pvMem;
7140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7141 }
7142
7143#ifdef IEM_WITH_DATA_TLB
7144 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7145
7146 /*
7147 * Get the TLB entry for this page checking that it has the A & D bits
7148 * set as per fAccess flags.
7149 */
7150 /** @todo make the caller pass these in with fAccess. */
7151 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7152 ? IEMTLBE_F_PT_NO_USER : 0;
7153 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7154 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7155 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7156 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7157 ? IEMTLBE_F_PT_NO_WRITE : 0)
7158 : 0;
7159 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7160 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7161 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7162 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7163 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7164 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7165 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7166 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7167 {
7168# ifdef IEM_WITH_TLB_STATISTICS
7169 if (a_fSafeCall)
7170 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7171 else
7172 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7173# endif
7174 }
7175 else
7176 {
7177 if (a_fSafeCall)
7178 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7179 else
7180 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7181
7182 /* This page table walking will set A and D bits as required by the
7183 access while performing the walk.
7184 ASSUMES these are set when the address is translated rather than on commit... */
7185 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7186 PGMPTWALKFAST WalkFast;
7187 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7188 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7189 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7190 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7191 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7192 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7193 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7194 fQPage |= PGMQPAGE_F_USER_MODE;
7195 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7196 if (RT_SUCCESS(rc))
7197 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7198 else
7199 {
7200 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7201# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7202 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7203 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7204# endif
7205 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7206 }
7207
7208 uint32_t fDataBps;
7209 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7210 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7211 {
7212 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7213 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7214 {
7215 pTlbe--;
7216 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7217 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7218 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7219 }
7220 else
7221 {
7222 if (a_fSafeCall)
7223 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7224 else
7225 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7226 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7227 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7228 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7229 }
7230 }
7231 else
7232 {
7233 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7234 to the page with the data access breakpoint armed on it to pass thru here. */
7235 if (fDataBps > 1)
7236 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7237 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7238 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7239 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7240 pTlbe->uTag = uTagNoRev;
7241 }
7242 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7243 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7244 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7245 pTlbe->GCPhys = GCPhysPg;
7246 pTlbe->pbMappingR3 = NULL;
7247 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7248 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7249 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7250
7251 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
7252 {
7253 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
7254 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7255 else
7256 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
7257 }
7258
7259 /* Resolve the physical address. */
7260 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7261 uint8_t *pbMemFullLoad = NULL;
7262 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7263 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7264 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7265# ifdef IN_RING3
7266 pTlbe->pbMappingR3 = pbMemFullLoad;
7267# endif
7268 }
7269
7270 /*
7271 * Check the flags and physical revision.
7272 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7273 * just to keep the code structure simple (i.e. avoid gotos or similar).
7274 */
7275 uint8_t *pbMem;
7276 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7277 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7278# ifdef IN_RING3
7279 pbMem = pTlbe->pbMappingR3;
7280# else
7281 pbMem = NULL;
7282# endif
7283 else
7284 {
7285 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7286
7287 /*
7288 * Okay, something isn't quite right or needs refreshing.
7289 */
7290 /* Write to read only memory? */
7291 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7292 {
7293 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7294# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7295/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7296 * to trigger an \#PG or a VM nested paging exit here yet! */
7297 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7298 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7299# endif
7300 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7301 }
7302
7303 /* Kernel memory accessed by userland? */
7304 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7305 {
7306 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7307# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7308/** @todo TLB: See above. */
7309 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7310 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7311# endif
7312 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7313 }
7314
7315 /*
7316 * Check if the physical page info needs updating.
7317 */
7318 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7319# ifdef IN_RING3
7320 pbMem = pTlbe->pbMappingR3;
7321# else
7322 pbMem = NULL;
7323# endif
7324 else
7325 {
7326 pTlbe->pbMappingR3 = NULL;
7327 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7328 pbMem = NULL;
7329 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7330 &pbMem, &pTlbe->fFlagsAndPhysRev);
7331 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7332# ifdef IN_RING3
7333 pTlbe->pbMappingR3 = pbMem;
7334# endif
7335 }
7336
7337 /*
7338 * Check the physical page level access and mapping.
7339 */
7340 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7341 { /* probably likely */ }
7342 else
7343 {
7344 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7345 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7346 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7347 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7348 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7349 if (rcStrict == VINF_SUCCESS)
7350 return pbMem;
7351 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7352 }
7353 }
7354 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7355
7356 if (pbMem)
7357 {
7358 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7359 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7360 fAccess |= IEM_ACCESS_NOT_LOCKED;
7361 }
7362 else
7363 {
7364 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7365 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7366 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7367 if (rcStrict == VINF_SUCCESS)
7368 {
7369 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7370 return pbMem;
7371 }
7372 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7373 }
7374
7375 void * const pvMem = pbMem;
7376
7377 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7378 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7379 if (fAccess & IEM_ACCESS_TYPE_READ)
7380 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7381
7382#else /* !IEM_WITH_DATA_TLB */
7383
7384
7385 RTGCPHYS GCPhysFirst;
7386 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7387 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7388 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7389
7390 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7391 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7392 if (fAccess & IEM_ACCESS_TYPE_READ)
7393 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7394
7395 void *pvMem;
7396 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7397 if (rcStrict == VINF_SUCCESS)
7398 { /* likely */ }
7399 else
7400 {
7401 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7402 if (rcStrict == VINF_SUCCESS)
7403 return pvMem;
7404 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7405 }
7406
7407#endif /* !IEM_WITH_DATA_TLB */
7408
7409 /*
7410 * Fill in the mapping table entry.
7411 */
7412 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7413 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7414 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7415 pVCpu->iem.s.cActiveMappings++;
7416
7417 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7418
7419 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7420 return pvMem;
7421}
7422
7423
7424/** @see iemMemMapJmp */
7425static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7426 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7427{
7428 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7429}
7430
7431
7432/**
7433 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 * @param pvMem The mapping.
7437 * @param fAccess The kind of access.
7438 */
7439void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7440{
7441 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7442 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7443 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7444 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7445 == ((unsigned)bUnmapInfo >> 4),
7446 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7447
7448 /* If it's bounce buffered, we may need to write back the buffer. */
7449 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7450 {
7451 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7452 {
7453 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7454 if (rcStrict == VINF_SUCCESS)
7455 return;
7456 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7457 }
7458 }
7459 /* Otherwise unlock it. */
7460 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7461 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7462
7463 /* Free the entry. */
7464 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7465 Assert(pVCpu->iem.s.cActiveMappings != 0);
7466 pVCpu->iem.s.cActiveMappings--;
7467}
7468
7469
7470/** Fallback for iemMemCommitAndUnmapRwJmp. */
7471void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7472{
7473 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7474 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7475}
7476
7477
7478/** Fallback for iemMemCommitAndUnmapAtJmp. */
7479void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7480{
7481 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7482 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7483}
7484
7485
7486/** Fallback for iemMemCommitAndUnmapWoJmp. */
7487void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7488{
7489 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7490 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7491}
7492
7493
7494/** Fallback for iemMemCommitAndUnmapRoJmp. */
7495void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7496{
7497 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7498 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7499}
7500
7501
7502/** Fallback for iemMemRollbackAndUnmapWo. */
7503void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7504{
7505 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7506 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7507}
7508
7509#endif /* IEM_WITH_SETJMP */
7510
7511#ifndef IN_RING3
7512/**
7513 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7514 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7515 *
7516 * Allows the instruction to be completed and retired, while the IEM user will
7517 * return to ring-3 immediately afterwards and do the postponed writes there.
7518 *
7519 * @returns VBox status code (no strict statuses). Caller must check
7520 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7522 * @param pvMem The mapping.
7523 * @param fAccess The kind of access.
7524 */
7525VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7526{
7527 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7528 AssertMsgReturn( (bUnmapInfo & 0x08)
7529 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7530 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7531 == ((unsigned)bUnmapInfo >> 4),
7532 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7533 VERR_NOT_FOUND);
7534
7535 /* If it's bounce buffered, we may need to write back the buffer. */
7536 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7537 {
7538 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7539 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7540 }
7541 /* Otherwise unlock it. */
7542 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7543 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7544
7545 /* Free the entry. */
7546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7547 Assert(pVCpu->iem.s.cActiveMappings != 0);
7548 pVCpu->iem.s.cActiveMappings--;
7549 return VINF_SUCCESS;
7550}
7551#endif
7552
7553
7554/**
7555 * Rollbacks mappings, releasing page locks and such.
7556 *
7557 * The caller shall only call this after checking cActiveMappings.
7558 *
7559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7560 */
7561void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7562{
7563 Assert(pVCpu->iem.s.cActiveMappings > 0);
7564
7565 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7566 while (iMemMap-- > 0)
7567 {
7568 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7569 if (fAccess != IEM_ACCESS_INVALID)
7570 {
7571 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7572 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7573 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7574 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7575 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7576 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7577 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7578 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7579 pVCpu->iem.s.cActiveMappings--;
7580 }
7581 }
7582}
7583
7584
7585/*
7586 * Instantiate R/W templates.
7587 */
7588#define TMPL_MEM_WITH_STACK
7589
7590#define TMPL_MEM_TYPE uint8_t
7591#define TMPL_MEM_FN_SUFF U8
7592#define TMPL_MEM_FMT_TYPE "%#04x"
7593#define TMPL_MEM_FMT_DESC "byte"
7594#include "IEMAllMemRWTmpl.cpp.h"
7595
7596#define TMPL_MEM_TYPE uint16_t
7597#define TMPL_MEM_FN_SUFF U16
7598#define TMPL_MEM_FMT_TYPE "%#06x"
7599#define TMPL_MEM_FMT_DESC "word"
7600#include "IEMAllMemRWTmpl.cpp.h"
7601
7602#define TMPL_WITH_PUSH_SREG
7603#define TMPL_MEM_TYPE uint32_t
7604#define TMPL_MEM_FN_SUFF U32
7605#define TMPL_MEM_FMT_TYPE "%#010x"
7606#define TMPL_MEM_FMT_DESC "dword"
7607#include "IEMAllMemRWTmpl.cpp.h"
7608#undef TMPL_WITH_PUSH_SREG
7609
7610#define TMPL_MEM_TYPE uint64_t
7611#define TMPL_MEM_FN_SUFF U64
7612#define TMPL_MEM_FMT_TYPE "%#018RX64"
7613#define TMPL_MEM_FMT_DESC "qword"
7614#include "IEMAllMemRWTmpl.cpp.h"
7615
7616#undef TMPL_MEM_WITH_STACK
7617
7618#define TMPL_MEM_TYPE uint32_t
7619#define TMPL_MEM_TYPE_ALIGN 0
7620#define TMPL_MEM_FN_SUFF U32NoAc
7621#define TMPL_MEM_FMT_TYPE "%#010x"
7622#define TMPL_MEM_FMT_DESC "dword"
7623#include "IEMAllMemRWTmpl.cpp.h"
7624#undef TMPL_WITH_PUSH_SREG
7625
7626#define TMPL_MEM_TYPE uint64_t
7627#define TMPL_MEM_TYPE_ALIGN 0
7628#define TMPL_MEM_FN_SUFF U64NoAc
7629#define TMPL_MEM_FMT_TYPE "%#018RX64"
7630#define TMPL_MEM_FMT_DESC "qword"
7631#include "IEMAllMemRWTmpl.cpp.h"
7632
7633#define TMPL_MEM_TYPE uint64_t
7634#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7635#define TMPL_MEM_FN_SUFF U64AlignedU128
7636#define TMPL_MEM_FMT_TYPE "%#018RX64"
7637#define TMPL_MEM_FMT_DESC "qword"
7638#include "IEMAllMemRWTmpl.cpp.h"
7639
7640/* See IEMAllMemRWTmplInline.cpp.h */
7641#define TMPL_MEM_BY_REF
7642
7643#define TMPL_MEM_TYPE RTFLOAT80U
7644#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7645#define TMPL_MEM_FN_SUFF R80
7646#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7647#define TMPL_MEM_FMT_DESC "tword"
7648#include "IEMAllMemRWTmpl.cpp.h"
7649
7650#define TMPL_MEM_TYPE RTPBCD80U
7651#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7652#define TMPL_MEM_FN_SUFF D80
7653#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7654#define TMPL_MEM_FMT_DESC "tword"
7655#include "IEMAllMemRWTmpl.cpp.h"
7656
7657#define TMPL_MEM_TYPE RTUINT128U
7658#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7659#define TMPL_MEM_FN_SUFF U128
7660#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7661#define TMPL_MEM_FMT_DESC "dqword"
7662#include "IEMAllMemRWTmpl.cpp.h"
7663
7664#define TMPL_MEM_TYPE RTUINT128U
7665#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7666#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7667#define TMPL_MEM_FN_SUFF U128AlignedSse
7668#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7669#define TMPL_MEM_FMT_DESC "dqword"
7670#include "IEMAllMemRWTmpl.cpp.h"
7671
7672#define TMPL_MEM_TYPE RTUINT128U
7673#define TMPL_MEM_TYPE_ALIGN 0
7674#define TMPL_MEM_FN_SUFF U128NoAc
7675#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7676#define TMPL_MEM_FMT_DESC "dqword"
7677#include "IEMAllMemRWTmpl.cpp.h"
7678
7679#define TMPL_MEM_TYPE RTUINT256U
7680#define TMPL_MEM_TYPE_ALIGN 0
7681#define TMPL_MEM_FN_SUFF U256NoAc
7682#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7683#define TMPL_MEM_FMT_DESC "qqword"
7684#include "IEMAllMemRWTmpl.cpp.h"
7685
7686#define TMPL_MEM_TYPE RTUINT256U
7687#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7688#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7689#define TMPL_MEM_FN_SUFF U256AlignedAvx
7690#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7691#define TMPL_MEM_FMT_DESC "qqword"
7692#include "IEMAllMemRWTmpl.cpp.h"
7693
7694/**
7695 * Fetches a data dword and zero extends it to a qword.
7696 *
7697 * @returns Strict VBox status code.
7698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7699 * @param pu64Dst Where to return the qword.
7700 * @param iSegReg The index of the segment register to use for
7701 * this access. The base and limits are checked.
7702 * @param GCPtrMem The address of the guest memory.
7703 */
7704VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7705{
7706 /* The lazy approach for now... */
7707 uint8_t bUnmapInfo;
7708 uint32_t const *pu32Src;
7709 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7710 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7711 if (rc == VINF_SUCCESS)
7712 {
7713 *pu64Dst = *pu32Src;
7714 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7715 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7716 }
7717 return rc;
7718}
7719
7720
7721#ifdef SOME_UNUSED_FUNCTION
7722/**
7723 * Fetches a data dword and sign extends it to a qword.
7724 *
7725 * @returns Strict VBox status code.
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param pu64Dst Where to return the sign extended value.
7728 * @param iSegReg The index of the segment register to use for
7729 * this access. The base and limits are checked.
7730 * @param GCPtrMem The address of the guest memory.
7731 */
7732VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7733{
7734 /* The lazy approach for now... */
7735 uint8_t bUnmapInfo;
7736 int32_t const *pi32Src;
7737 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7738 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7739 if (rc == VINF_SUCCESS)
7740 {
7741 *pu64Dst = *pi32Src;
7742 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7743 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7744 }
7745#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7746 else
7747 *pu64Dst = 0;
7748#endif
7749 return rc;
7750}
7751#endif
7752
7753
7754/**
7755 * Fetches a descriptor register (lgdt, lidt).
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param pcbLimit Where to return the limit.
7760 * @param pGCPtrBase Where to return the base.
7761 * @param iSegReg The index of the segment register to use for
7762 * this access. The base and limits are checked.
7763 * @param GCPtrMem The address of the guest memory.
7764 * @param enmOpSize The effective operand size.
7765 */
7766VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7767 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7768{
7769 /*
7770 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7771 * little special:
7772 * - The two reads are done separately.
7773 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7774 * - We suspect the 386 to actually commit the limit before the base in
7775 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7776 * don't try emulate this eccentric behavior, because it's not well
7777 * enough understood and rather hard to trigger.
7778 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7779 */
7780 VBOXSTRICTRC rcStrict;
7781 if (IEM_IS_64BIT_CODE(pVCpu))
7782 {
7783 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7784 if (rcStrict == VINF_SUCCESS)
7785 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7786 }
7787 else
7788 {
7789 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7790 if (enmOpSize == IEMMODE_32BIT)
7791 {
7792 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7793 {
7794 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7795 if (rcStrict == VINF_SUCCESS)
7796 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7797 }
7798 else
7799 {
7800 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7801 if (rcStrict == VINF_SUCCESS)
7802 {
7803 *pcbLimit = (uint16_t)uTmp;
7804 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7805 }
7806 }
7807 if (rcStrict == VINF_SUCCESS)
7808 *pGCPtrBase = uTmp;
7809 }
7810 else
7811 {
7812 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7813 if (rcStrict == VINF_SUCCESS)
7814 {
7815 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7816 if (rcStrict == VINF_SUCCESS)
7817 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7818 }
7819 }
7820 }
7821 return rcStrict;
7822}
7823
7824
7825/**
7826 * Stores a data dqword, SSE aligned.
7827 *
7828 * @returns Strict VBox status code.
7829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7830 * @param iSegReg The index of the segment register to use for
7831 * this access. The base and limits are checked.
7832 * @param GCPtrMem The address of the guest memory.
7833 * @param u128Value The value to store.
7834 */
7835VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7836{
7837 /* The lazy approach for now... */
7838 uint8_t bUnmapInfo;
7839 PRTUINT128U pu128Dst;
7840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7841 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7842 if (rc == VINF_SUCCESS)
7843 {
7844 pu128Dst->au64[0] = u128Value.au64[0];
7845 pu128Dst->au64[1] = u128Value.au64[1];
7846 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7847 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7848 }
7849 return rc;
7850}
7851
7852
7853#ifdef IEM_WITH_SETJMP
7854/**
7855 * Stores a data dqword, SSE aligned.
7856 *
7857 * @returns Strict VBox status code.
7858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7859 * @param iSegReg The index of the segment register to use for
7860 * this access. The base and limits are checked.
7861 * @param GCPtrMem The address of the guest memory.
7862 * @param u128Value The value to store.
7863 */
7864void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7865 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7866{
7867 /* The lazy approach for now... */
7868 uint8_t bUnmapInfo;
7869 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7870 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7871 pu128Dst->au64[0] = u128Value.au64[0];
7872 pu128Dst->au64[1] = u128Value.au64[1];
7873 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7874 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7875}
7876#endif
7877
7878
7879/**
7880 * Stores a data dqword.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param iSegReg The index of the segment register to use for
7885 * this access. The base and limits are checked.
7886 * @param GCPtrMem The address of the guest memory.
7887 * @param pu256Value Pointer to the value to store.
7888 */
7889VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7890{
7891 /* The lazy approach for now... */
7892 uint8_t bUnmapInfo;
7893 PRTUINT256U pu256Dst;
7894 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7895 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7896 if (rc == VINF_SUCCESS)
7897 {
7898 pu256Dst->au64[0] = pu256Value->au64[0];
7899 pu256Dst->au64[1] = pu256Value->au64[1];
7900 pu256Dst->au64[2] = pu256Value->au64[2];
7901 pu256Dst->au64[3] = pu256Value->au64[3];
7902 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7903 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7904 }
7905 return rc;
7906}
7907
7908
7909#ifdef IEM_WITH_SETJMP
7910/**
7911 * Stores a data dqword, longjmp on error.
7912 *
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param iSegReg The index of the segment register to use for
7915 * this access. The base and limits are checked.
7916 * @param GCPtrMem The address of the guest memory.
7917 * @param pu256Value Pointer to the value to store.
7918 */
7919void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7920{
7921 /* The lazy approach for now... */
7922 uint8_t bUnmapInfo;
7923 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7924 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7925 pu256Dst->au64[0] = pu256Value->au64[0];
7926 pu256Dst->au64[1] = pu256Value->au64[1];
7927 pu256Dst->au64[2] = pu256Value->au64[2];
7928 pu256Dst->au64[3] = pu256Value->au64[3];
7929 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7930 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7931}
7932#endif
7933
7934
7935/**
7936 * Stores a descriptor register (sgdt, sidt).
7937 *
7938 * @returns Strict VBox status code.
7939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7940 * @param cbLimit The limit.
7941 * @param GCPtrBase The base address.
7942 * @param iSegReg The index of the segment register to use for
7943 * this access. The base and limits are checked.
7944 * @param GCPtrMem The address of the guest memory.
7945 */
7946VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7947{
7948 /*
7949 * The SIDT and SGDT instructions actually stores the data using two
7950 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7951 * does not respond to opsize prefixes.
7952 */
7953 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7954 if (rcStrict == VINF_SUCCESS)
7955 {
7956 if (IEM_IS_16BIT_CODE(pVCpu))
7957 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7958 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7959 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7960 else if (IEM_IS_32BIT_CODE(pVCpu))
7961 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7962 else
7963 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7964 }
7965 return rcStrict;
7966}
7967
7968
7969/**
7970 * Begin a special stack push (used by interrupt, exceptions and such).
7971 *
7972 * This will raise \#SS or \#PF if appropriate.
7973 *
7974 * @returns Strict VBox status code.
7975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7976 * @param cbMem The number of bytes to push onto the stack.
7977 * @param cbAlign The alignment mask (7, 3, 1).
7978 * @param ppvMem Where to return the pointer to the stack memory.
7979 * As with the other memory functions this could be
7980 * direct access or bounce buffered access, so
7981 * don't commit register until the commit call
7982 * succeeds.
7983 * @param pbUnmapInfo Where to store unmap info for
7984 * iemMemStackPushCommitSpecial.
7985 * @param puNewRsp Where to return the new RSP value. This must be
7986 * passed unchanged to
7987 * iemMemStackPushCommitSpecial().
7988 */
7989VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7990 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7991{
7992 Assert(cbMem < UINT8_MAX);
7993 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7994 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7995}
7996
7997
7998/**
7999 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8000 *
8001 * This will update the rSP.
8002 *
8003 * @returns Strict VBox status code.
8004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8005 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
8006 * @param uNewRsp The new RSP value returned by
8007 * iemMemStackPushBeginSpecial().
8008 */
8009VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
8010{
8011 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8012 if (rcStrict == VINF_SUCCESS)
8013 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8014 return rcStrict;
8015}
8016
8017
8018/**
8019 * Begin a special stack pop (used by iret, retf and such).
8020 *
8021 * This will raise \#SS or \#PF if appropriate.
8022 *
8023 * @returns Strict VBox status code.
8024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8025 * @param cbMem The number of bytes to pop from the stack.
8026 * @param cbAlign The alignment mask (7, 3, 1).
8027 * @param ppvMem Where to return the pointer to the stack memory.
8028 * @param pbUnmapInfo Where to store unmap info for
8029 * iemMemStackPopDoneSpecial.
8030 * @param puNewRsp Where to return the new RSP value. This must be
8031 * assigned to CPUMCTX::rsp manually some time
8032 * after iemMemStackPopDoneSpecial() has been
8033 * called.
8034 */
8035VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8036 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8037{
8038 Assert(cbMem < UINT8_MAX);
8039 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8040 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8041}
8042
8043
8044/**
8045 * Continue a special stack pop (used by iret and retf), for the purpose of
8046 * retrieving a new stack pointer.
8047 *
8048 * This will raise \#SS or \#PF if appropriate.
8049 *
8050 * @returns Strict VBox status code.
8051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8052 * @param off Offset from the top of the stack. This is zero
8053 * except in the retf case.
8054 * @param cbMem The number of bytes to pop from the stack.
8055 * @param ppvMem Where to return the pointer to the stack memory.
8056 * @param pbUnmapInfo Where to store unmap info for
8057 * iemMemStackPopDoneSpecial.
8058 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8059 * return this because all use of this function is
8060 * to retrieve a new value and anything we return
8061 * here would be discarded.)
8062 */
8063VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8064 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8065{
8066 Assert(cbMem < UINT8_MAX);
8067
8068 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8069 RTGCPTR GCPtrTop;
8070 if (IEM_IS_64BIT_CODE(pVCpu))
8071 GCPtrTop = uCurNewRsp;
8072 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8073 GCPtrTop = (uint32_t)uCurNewRsp;
8074 else
8075 GCPtrTop = (uint16_t)uCurNewRsp;
8076
8077 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8078 0 /* checked in iemMemStackPopBeginSpecial */);
8079}
8080
8081
8082/**
8083 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8084 * iemMemStackPopContinueSpecial).
8085 *
8086 * The caller will manually commit the rSP.
8087 *
8088 * @returns Strict VBox status code.
8089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8090 * @param bUnmapInfo Unmap information returned by
8091 * iemMemStackPopBeginSpecial() or
8092 * iemMemStackPopContinueSpecial().
8093 */
8094VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8095{
8096 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8097}
8098
8099
8100/**
8101 * Fetches a system table byte.
8102 *
8103 * @returns Strict VBox status code.
8104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8105 * @param pbDst Where to return the byte.
8106 * @param iSegReg The index of the segment register to use for
8107 * this access. The base and limits are checked.
8108 * @param GCPtrMem The address of the guest memory.
8109 */
8110VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8111{
8112 /* The lazy approach for now... */
8113 uint8_t bUnmapInfo;
8114 uint8_t const *pbSrc;
8115 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8116 if (rc == VINF_SUCCESS)
8117 {
8118 *pbDst = *pbSrc;
8119 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8120 }
8121 return rc;
8122}
8123
8124
8125/**
8126 * Fetches a system table word.
8127 *
8128 * @returns Strict VBox status code.
8129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8130 * @param pu16Dst Where to return the word.
8131 * @param iSegReg The index of the segment register to use for
8132 * this access. The base and limits are checked.
8133 * @param GCPtrMem The address of the guest memory.
8134 */
8135VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8136{
8137 /* The lazy approach for now... */
8138 uint8_t bUnmapInfo;
8139 uint16_t const *pu16Src;
8140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8141 if (rc == VINF_SUCCESS)
8142 {
8143 *pu16Dst = *pu16Src;
8144 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8145 }
8146 return rc;
8147}
8148
8149
8150/**
8151 * Fetches a system table dword.
8152 *
8153 * @returns Strict VBox status code.
8154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8155 * @param pu32Dst Where to return the dword.
8156 * @param iSegReg The index of the segment register to use for
8157 * this access. The base and limits are checked.
8158 * @param GCPtrMem The address of the guest memory.
8159 */
8160VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8161{
8162 /* The lazy approach for now... */
8163 uint8_t bUnmapInfo;
8164 uint32_t const *pu32Src;
8165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8166 if (rc == VINF_SUCCESS)
8167 {
8168 *pu32Dst = *pu32Src;
8169 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8170 }
8171 return rc;
8172}
8173
8174
8175/**
8176 * Fetches a system table qword.
8177 *
8178 * @returns Strict VBox status code.
8179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8180 * @param pu64Dst Where to return the qword.
8181 * @param iSegReg The index of the segment register to use for
8182 * this access. The base and limits are checked.
8183 * @param GCPtrMem The address of the guest memory.
8184 */
8185VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8186{
8187 /* The lazy approach for now... */
8188 uint8_t bUnmapInfo;
8189 uint64_t const *pu64Src;
8190 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8191 if (rc == VINF_SUCCESS)
8192 {
8193 *pu64Dst = *pu64Src;
8194 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8195 }
8196 return rc;
8197}
8198
8199
8200/**
8201 * Fetches a descriptor table entry with caller specified error code.
8202 *
8203 * @returns Strict VBox status code.
8204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8205 * @param pDesc Where to return the descriptor table entry.
8206 * @param uSel The selector which table entry to fetch.
8207 * @param uXcpt The exception to raise on table lookup error.
8208 * @param uErrorCode The error code associated with the exception.
8209 */
8210static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8211 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8212{
8213 AssertPtr(pDesc);
8214 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8215
8216 /** @todo did the 286 require all 8 bytes to be accessible? */
8217 /*
8218 * Get the selector table base and check bounds.
8219 */
8220 RTGCPTR GCPtrBase;
8221 if (uSel & X86_SEL_LDT)
8222 {
8223 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8224 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8225 {
8226 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8227 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8228 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8229 uErrorCode, 0);
8230 }
8231
8232 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8233 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8234 }
8235 else
8236 {
8237 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8238 {
8239 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8240 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8241 uErrorCode, 0);
8242 }
8243 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8244 }
8245
8246 /*
8247 * Read the legacy descriptor and maybe the long mode extensions if
8248 * required.
8249 */
8250 VBOXSTRICTRC rcStrict;
8251 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8252 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8253 else
8254 {
8255 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8256 if (rcStrict == VINF_SUCCESS)
8257 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8258 if (rcStrict == VINF_SUCCESS)
8259 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8260 if (rcStrict == VINF_SUCCESS)
8261 pDesc->Legacy.au16[3] = 0;
8262 else
8263 return rcStrict;
8264 }
8265
8266 if (rcStrict == VINF_SUCCESS)
8267 {
8268 if ( !IEM_IS_LONG_MODE(pVCpu)
8269 || pDesc->Legacy.Gen.u1DescType)
8270 pDesc->Long.au64[1] = 0;
8271 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8272 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8273 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8274 else
8275 {
8276 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8277 /** @todo is this the right exception? */
8278 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8279 }
8280 }
8281 return rcStrict;
8282}
8283
8284
8285/**
8286 * Fetches a descriptor table entry.
8287 *
8288 * @returns Strict VBox status code.
8289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8290 * @param pDesc Where to return the descriptor table entry.
8291 * @param uSel The selector which table entry to fetch.
8292 * @param uXcpt The exception to raise on table lookup error.
8293 */
8294VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8295{
8296 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8297}
8298
8299
8300/**
8301 * Marks the selector descriptor as accessed (only non-system descriptors).
8302 *
8303 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8304 * will therefore skip the limit checks.
8305 *
8306 * @returns Strict VBox status code.
8307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8308 * @param uSel The selector.
8309 */
8310VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8311{
8312 /*
8313 * Get the selector table base and calculate the entry address.
8314 */
8315 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8316 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8317 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8318 GCPtr += uSel & X86_SEL_MASK;
8319
8320 /*
8321 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8322 * ugly stuff to avoid this. This will make sure it's an atomic access
8323 * as well more or less remove any question about 8-bit or 32-bit accesss.
8324 */
8325 VBOXSTRICTRC rcStrict;
8326 uint8_t bUnmapInfo;
8327 uint32_t volatile *pu32;
8328 if ((GCPtr & 3) == 0)
8329 {
8330 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8331 GCPtr += 2 + 2;
8332 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8333 if (rcStrict != VINF_SUCCESS)
8334 return rcStrict;
8335 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8336 }
8337 else
8338 {
8339 /* The misaligned GDT/LDT case, map the whole thing. */
8340 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8341 if (rcStrict != VINF_SUCCESS)
8342 return rcStrict;
8343 switch ((uintptr_t)pu32 & 3)
8344 {
8345 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8346 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8347 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8348 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8349 }
8350 }
8351
8352 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8353}
8354
8355
8356#undef LOG_GROUP
8357#define LOG_GROUP LOG_GROUP_IEM
8358
8359/** @} */
8360
8361/** @name Opcode Helpers.
8362 * @{
8363 */
8364
8365/**
8366 * Calculates the effective address of a ModR/M memory operand.
8367 *
8368 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8369 *
8370 * @return Strict VBox status code.
8371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8372 * @param bRm The ModRM byte.
8373 * @param cbImmAndRspOffset - First byte: The size of any immediate
8374 * following the effective address opcode bytes
8375 * (only for RIP relative addressing).
8376 * - Second byte: RSP displacement (for POP [ESP]).
8377 * @param pGCPtrEff Where to return the effective address.
8378 */
8379VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8380{
8381 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8382# define SET_SS_DEF() \
8383 do \
8384 { \
8385 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8386 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8387 } while (0)
8388
8389 if (!IEM_IS_64BIT_CODE(pVCpu))
8390 {
8391/** @todo Check the effective address size crap! */
8392 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8393 {
8394 uint16_t u16EffAddr;
8395
8396 /* Handle the disp16 form with no registers first. */
8397 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8398 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8399 else
8400 {
8401 /* Get the displacment. */
8402 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8403 {
8404 case 0: u16EffAddr = 0; break;
8405 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8406 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8407 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8408 }
8409
8410 /* Add the base and index registers to the disp. */
8411 switch (bRm & X86_MODRM_RM_MASK)
8412 {
8413 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8414 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8415 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8416 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8417 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8418 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8419 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8420 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8421 }
8422 }
8423
8424 *pGCPtrEff = u16EffAddr;
8425 }
8426 else
8427 {
8428 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8429 uint32_t u32EffAddr;
8430
8431 /* Handle the disp32 form with no registers first. */
8432 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8433 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8434 else
8435 {
8436 /* Get the register (or SIB) value. */
8437 switch ((bRm & X86_MODRM_RM_MASK))
8438 {
8439 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8440 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8441 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8442 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8443 case 4: /* SIB */
8444 {
8445 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8446
8447 /* Get the index and scale it. */
8448 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8449 {
8450 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8451 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8452 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8453 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8454 case 4: u32EffAddr = 0; /*none */ break;
8455 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8456 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8457 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8458 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8459 }
8460 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8461
8462 /* add base */
8463 switch (bSib & X86_SIB_BASE_MASK)
8464 {
8465 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8466 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8467 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8468 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8469 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8470 case 5:
8471 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8472 {
8473 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8474 SET_SS_DEF();
8475 }
8476 else
8477 {
8478 uint32_t u32Disp;
8479 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8480 u32EffAddr += u32Disp;
8481 }
8482 break;
8483 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8484 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8485 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8486 }
8487 break;
8488 }
8489 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8490 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8491 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8492 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8493 }
8494
8495 /* Get and add the displacement. */
8496 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8497 {
8498 case 0:
8499 break;
8500 case 1:
8501 {
8502 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8503 u32EffAddr += i8Disp;
8504 break;
8505 }
8506 case 2:
8507 {
8508 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8509 u32EffAddr += u32Disp;
8510 break;
8511 }
8512 default:
8513 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8514 }
8515
8516 }
8517 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8518 *pGCPtrEff = u32EffAddr;
8519 }
8520 }
8521 else
8522 {
8523 uint64_t u64EffAddr;
8524
8525 /* Handle the rip+disp32 form with no registers first. */
8526 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8527 {
8528 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8529 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8530 }
8531 else
8532 {
8533 /* Get the register (or SIB) value. */
8534 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8535 {
8536 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8537 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8538 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8539 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8540 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8541 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8542 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8543 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8544 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8545 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8546 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8547 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8548 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8549 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8550 /* SIB */
8551 case 4:
8552 case 12:
8553 {
8554 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8555
8556 /* Get the index and scale it. */
8557 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8558 {
8559 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8560 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8561 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8562 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8563 case 4: u64EffAddr = 0; /*none */ break;
8564 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8565 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8566 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8567 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8568 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8569 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8570 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8571 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8572 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8573 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8574 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8576 }
8577 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8578
8579 /* add base */
8580 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8581 {
8582 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8583 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8584 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8585 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8586 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8587 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8588 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8589 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8590 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8591 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8592 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8593 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8594 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8595 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8596 /* complicated encodings */
8597 case 5:
8598 case 13:
8599 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8600 {
8601 if (!pVCpu->iem.s.uRexB)
8602 {
8603 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8604 SET_SS_DEF();
8605 }
8606 else
8607 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8608 }
8609 else
8610 {
8611 uint32_t u32Disp;
8612 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8613 u64EffAddr += (int32_t)u32Disp;
8614 }
8615 break;
8616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8617 }
8618 break;
8619 }
8620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8621 }
8622
8623 /* Get and add the displacement. */
8624 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8625 {
8626 case 0:
8627 break;
8628 case 1:
8629 {
8630 int8_t i8Disp;
8631 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8632 u64EffAddr += i8Disp;
8633 break;
8634 }
8635 case 2:
8636 {
8637 uint32_t u32Disp;
8638 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8639 u64EffAddr += (int32_t)u32Disp;
8640 break;
8641 }
8642 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8643 }
8644
8645 }
8646
8647 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8648 *pGCPtrEff = u64EffAddr;
8649 else
8650 {
8651 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8652 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8653 }
8654 }
8655
8656 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8657 return VINF_SUCCESS;
8658}
8659
8660
8661#ifdef IEM_WITH_SETJMP
8662/**
8663 * Calculates the effective address of a ModR/M memory operand.
8664 *
8665 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8666 *
8667 * May longjmp on internal error.
8668 *
8669 * @return The effective address.
8670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8671 * @param bRm The ModRM byte.
8672 * @param cbImmAndRspOffset - First byte: The size of any immediate
8673 * following the effective address opcode bytes
8674 * (only for RIP relative addressing).
8675 * - Second byte: RSP displacement (for POP [ESP]).
8676 */
8677RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8678{
8679 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8680# define SET_SS_DEF() \
8681 do \
8682 { \
8683 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8684 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8685 } while (0)
8686
8687 if (!IEM_IS_64BIT_CODE(pVCpu))
8688 {
8689/** @todo Check the effective address size crap! */
8690 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8691 {
8692 uint16_t u16EffAddr;
8693
8694 /* Handle the disp16 form with no registers first. */
8695 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8696 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8697 else
8698 {
8699 /* Get the displacment. */
8700 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8701 {
8702 case 0: u16EffAddr = 0; break;
8703 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8704 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8705 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8706 }
8707
8708 /* Add the base and index registers to the disp. */
8709 switch (bRm & X86_MODRM_RM_MASK)
8710 {
8711 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8712 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8713 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8714 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8715 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8716 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8717 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8718 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8719 }
8720 }
8721
8722 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8723 return u16EffAddr;
8724 }
8725
8726 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8727 uint32_t u32EffAddr;
8728
8729 /* Handle the disp32 form with no registers first. */
8730 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8731 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8732 else
8733 {
8734 /* Get the register (or SIB) value. */
8735 switch ((bRm & X86_MODRM_RM_MASK))
8736 {
8737 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8738 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8739 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8740 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8741 case 4: /* SIB */
8742 {
8743 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8744
8745 /* Get the index and scale it. */
8746 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8747 {
8748 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8749 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8750 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8751 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8752 case 4: u32EffAddr = 0; /*none */ break;
8753 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8754 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8755 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8756 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8757 }
8758 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8759
8760 /* add base */
8761 switch (bSib & X86_SIB_BASE_MASK)
8762 {
8763 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8764 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8765 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8766 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8767 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8768 case 5:
8769 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8770 {
8771 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8772 SET_SS_DEF();
8773 }
8774 else
8775 {
8776 uint32_t u32Disp;
8777 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8778 u32EffAddr += u32Disp;
8779 }
8780 break;
8781 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8782 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8783 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8784 }
8785 break;
8786 }
8787 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8788 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8789 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8790 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8791 }
8792
8793 /* Get and add the displacement. */
8794 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8795 {
8796 case 0:
8797 break;
8798 case 1:
8799 {
8800 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8801 u32EffAddr += i8Disp;
8802 break;
8803 }
8804 case 2:
8805 {
8806 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8807 u32EffAddr += u32Disp;
8808 break;
8809 }
8810 default:
8811 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8812 }
8813 }
8814
8815 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8816 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8817 return u32EffAddr;
8818 }
8819
8820 uint64_t u64EffAddr;
8821
8822 /* Handle the rip+disp32 form with no registers first. */
8823 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8824 {
8825 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8826 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8827 }
8828 else
8829 {
8830 /* Get the register (or SIB) value. */
8831 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8832 {
8833 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8834 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8835 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8836 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8837 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8838 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8839 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8840 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8841 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8842 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8843 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8844 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8845 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8846 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8847 /* SIB */
8848 case 4:
8849 case 12:
8850 {
8851 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8852
8853 /* Get the index and scale it. */
8854 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8855 {
8856 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8857 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8858 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8859 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8860 case 4: u64EffAddr = 0; /*none */ break;
8861 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8862 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8863 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8864 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8865 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8866 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8867 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8868 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8869 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8870 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8871 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8872 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8873 }
8874 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8875
8876 /* add base */
8877 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8878 {
8879 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8880 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8881 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8882 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8883 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8884 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8885 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8886 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8887 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8888 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8889 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8890 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8891 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8892 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8893 /* complicated encodings */
8894 case 5:
8895 case 13:
8896 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8897 {
8898 if (!pVCpu->iem.s.uRexB)
8899 {
8900 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8901 SET_SS_DEF();
8902 }
8903 else
8904 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8905 }
8906 else
8907 {
8908 uint32_t u32Disp;
8909 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8910 u64EffAddr += (int32_t)u32Disp;
8911 }
8912 break;
8913 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8914 }
8915 break;
8916 }
8917 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8918 }
8919
8920 /* Get and add the displacement. */
8921 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8922 {
8923 case 0:
8924 break;
8925 case 1:
8926 {
8927 int8_t i8Disp;
8928 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8929 u64EffAddr += i8Disp;
8930 break;
8931 }
8932 case 2:
8933 {
8934 uint32_t u32Disp;
8935 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8936 u64EffAddr += (int32_t)u32Disp;
8937 break;
8938 }
8939 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8940 }
8941
8942 }
8943
8944 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8945 {
8946 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8947 return u64EffAddr;
8948 }
8949 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8950 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8951 return u64EffAddr & UINT32_MAX;
8952}
8953#endif /* IEM_WITH_SETJMP */
8954
8955
8956/**
8957 * Calculates the effective address of a ModR/M memory operand, extended version
8958 * for use in the recompilers.
8959 *
8960 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8961 *
8962 * @return Strict VBox status code.
8963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8964 * @param bRm The ModRM byte.
8965 * @param cbImmAndRspOffset - First byte: The size of any immediate
8966 * following the effective address opcode bytes
8967 * (only for RIP relative addressing).
8968 * - Second byte: RSP displacement (for POP [ESP]).
8969 * @param pGCPtrEff Where to return the effective address.
8970 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8971 * SIB byte (bits 39:32).
8972 */
8973VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8974{
8975 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8976# define SET_SS_DEF() \
8977 do \
8978 { \
8979 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8980 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8981 } while (0)
8982
8983 uint64_t uInfo;
8984 if (!IEM_IS_64BIT_CODE(pVCpu))
8985 {
8986/** @todo Check the effective address size crap! */
8987 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8988 {
8989 uint16_t u16EffAddr;
8990
8991 /* Handle the disp16 form with no registers first. */
8992 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8993 {
8994 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8995 uInfo = u16EffAddr;
8996 }
8997 else
8998 {
8999 /* Get the displacment. */
9000 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9001 {
9002 case 0: u16EffAddr = 0; break;
9003 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9004 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9005 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9006 }
9007 uInfo = u16EffAddr;
9008
9009 /* Add the base and index registers to the disp. */
9010 switch (bRm & X86_MODRM_RM_MASK)
9011 {
9012 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9013 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9014 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9015 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9016 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9017 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9018 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9019 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9020 }
9021 }
9022
9023 *pGCPtrEff = u16EffAddr;
9024 }
9025 else
9026 {
9027 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9028 uint32_t u32EffAddr;
9029
9030 /* Handle the disp32 form with no registers first. */
9031 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9032 {
9033 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9034 uInfo = u32EffAddr;
9035 }
9036 else
9037 {
9038 /* Get the register (or SIB) value. */
9039 uInfo = 0;
9040 switch ((bRm & X86_MODRM_RM_MASK))
9041 {
9042 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9043 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9044 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9045 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9046 case 4: /* SIB */
9047 {
9048 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9049 uInfo = (uint64_t)bSib << 32;
9050
9051 /* Get the index and scale it. */
9052 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9053 {
9054 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9055 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9056 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9057 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9058 case 4: u32EffAddr = 0; /*none */ break;
9059 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9060 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9061 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9062 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9063 }
9064 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9065
9066 /* add base */
9067 switch (bSib & X86_SIB_BASE_MASK)
9068 {
9069 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9070 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9071 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9072 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9073 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9074 case 5:
9075 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9076 {
9077 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9078 SET_SS_DEF();
9079 }
9080 else
9081 {
9082 uint32_t u32Disp;
9083 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9084 u32EffAddr += u32Disp;
9085 uInfo |= u32Disp;
9086 }
9087 break;
9088 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9089 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9090 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9091 }
9092 break;
9093 }
9094 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9095 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9096 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9097 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9098 }
9099
9100 /* Get and add the displacement. */
9101 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9102 {
9103 case 0:
9104 break;
9105 case 1:
9106 {
9107 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9108 u32EffAddr += i8Disp;
9109 uInfo |= (uint32_t)(int32_t)i8Disp;
9110 break;
9111 }
9112 case 2:
9113 {
9114 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9115 u32EffAddr += u32Disp;
9116 uInfo |= (uint32_t)u32Disp;
9117 break;
9118 }
9119 default:
9120 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9121 }
9122
9123 }
9124 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9125 *pGCPtrEff = u32EffAddr;
9126 }
9127 }
9128 else
9129 {
9130 uint64_t u64EffAddr;
9131
9132 /* Handle the rip+disp32 form with no registers first. */
9133 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9134 {
9135 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9136 uInfo = (uint32_t)u64EffAddr;
9137 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9138 }
9139 else
9140 {
9141 /* Get the register (or SIB) value. */
9142 uInfo = 0;
9143 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9144 {
9145 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9146 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9147 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9148 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9149 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9150 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9151 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9152 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9153 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9154 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9155 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9156 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9157 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9158 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9159 /* SIB */
9160 case 4:
9161 case 12:
9162 {
9163 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9164 uInfo = (uint64_t)bSib << 32;
9165
9166 /* Get the index and scale it. */
9167 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9168 {
9169 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9170 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9171 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9172 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9173 case 4: u64EffAddr = 0; /*none */ break;
9174 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9175 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9176 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9177 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9178 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9179 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9180 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9181 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9182 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9183 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9184 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9185 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9186 }
9187 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9188
9189 /* add base */
9190 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9191 {
9192 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9193 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9194 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9195 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9196 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9197 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9198 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9199 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9200 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9201 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9202 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9203 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9204 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9205 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9206 /* complicated encodings */
9207 case 5:
9208 case 13:
9209 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9210 {
9211 if (!pVCpu->iem.s.uRexB)
9212 {
9213 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9214 SET_SS_DEF();
9215 }
9216 else
9217 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9218 }
9219 else
9220 {
9221 uint32_t u32Disp;
9222 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9223 u64EffAddr += (int32_t)u32Disp;
9224 uInfo |= u32Disp;
9225 }
9226 break;
9227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9228 }
9229 break;
9230 }
9231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9232 }
9233
9234 /* Get and add the displacement. */
9235 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9236 {
9237 case 0:
9238 break;
9239 case 1:
9240 {
9241 int8_t i8Disp;
9242 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9243 u64EffAddr += i8Disp;
9244 uInfo |= (uint32_t)(int32_t)i8Disp;
9245 break;
9246 }
9247 case 2:
9248 {
9249 uint32_t u32Disp;
9250 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9251 u64EffAddr += (int32_t)u32Disp;
9252 uInfo |= u32Disp;
9253 break;
9254 }
9255 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9256 }
9257
9258 }
9259
9260 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9261 *pGCPtrEff = u64EffAddr;
9262 else
9263 {
9264 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9265 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9266 }
9267 }
9268 *puInfo = uInfo;
9269
9270 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9271 return VINF_SUCCESS;
9272}
9273
9274/** @} */
9275
9276
9277#ifdef LOG_ENABLED
9278/**
9279 * Logs the current instruction.
9280 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9281 * @param fSameCtx Set if we have the same context information as the VMM,
9282 * clear if we may have already executed an instruction in
9283 * our debug context. When clear, we assume IEMCPU holds
9284 * valid CPU mode info.
9285 *
9286 * The @a fSameCtx parameter is now misleading and obsolete.
9287 * @param pszFunction The IEM function doing the execution.
9288 */
9289static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9290{
9291# ifdef IN_RING3
9292 if (LogIs2Enabled())
9293 {
9294 char szInstr[256];
9295 uint32_t cbInstr = 0;
9296 if (fSameCtx)
9297 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9298 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9299 szInstr, sizeof(szInstr), &cbInstr);
9300 else
9301 {
9302 uint32_t fFlags = 0;
9303 switch (IEM_GET_CPU_MODE(pVCpu))
9304 {
9305 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9306 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9307 case IEMMODE_16BIT:
9308 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9309 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9310 else
9311 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9312 break;
9313 }
9314 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9315 szInstr, sizeof(szInstr), &cbInstr);
9316 }
9317
9318 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9319 Log2(("**** %s fExec=%x\n"
9320 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9321 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9322 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9323 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9324 " %s\n"
9325 , pszFunction, pVCpu->iem.s.fExec,
9326 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9327 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9328 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9329 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9330 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9331 szInstr));
9332
9333 /* This stuff sucks atm. as it fills the log with MSRs. */
9334 //if (LogIs3Enabled())
9335 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9336 }
9337 else
9338# endif
9339 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9340 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9341 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9342}
9343#endif /* LOG_ENABLED */
9344
9345
9346#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9347/**
9348 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9349 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9350 *
9351 * @returns Modified rcStrict.
9352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9353 * @param rcStrict The instruction execution status.
9354 */
9355static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9356{
9357 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9358 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9359 {
9360 /* VMX preemption timer takes priority over NMI-window exits. */
9361 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9362 {
9363 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9364 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9365 }
9366 /*
9367 * Check remaining intercepts.
9368 *
9369 * NMI-window and Interrupt-window VM-exits.
9370 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9371 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9372 *
9373 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9374 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9375 */
9376 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9377 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9378 && !TRPMHasTrap(pVCpu))
9379 {
9380 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9381 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9382 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9383 {
9384 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9385 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9386 }
9387 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9388 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9389 {
9390 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9391 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9392 }
9393 }
9394 }
9395 /* TPR-below threshold/APIC write has the highest priority. */
9396 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9397 {
9398 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9399 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9400 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9401 }
9402 /* MTF takes priority over VMX-preemption timer. */
9403 else
9404 {
9405 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9406 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9407 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9408 }
9409 return rcStrict;
9410}
9411#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9412
9413
9414/**
9415 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9416 * IEMExecOneWithPrefetchedByPC.
9417 *
9418 * Similar code is found in IEMExecLots.
9419 *
9420 * @return Strict VBox status code.
9421 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9422 * @param fExecuteInhibit If set, execute the instruction following CLI,
9423 * POP SS and MOV SS,GR.
9424 * @param pszFunction The calling function name.
9425 */
9426DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9427{
9428 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9429 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9430 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9431 RT_NOREF_PV(pszFunction);
9432
9433#ifdef IEM_WITH_SETJMP
9434 VBOXSTRICTRC rcStrict;
9435 IEM_TRY_SETJMP(pVCpu, rcStrict)
9436 {
9437 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9438 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9439 }
9440 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9441 {
9442 pVCpu->iem.s.cLongJumps++;
9443 }
9444 IEM_CATCH_LONGJMP_END(pVCpu);
9445#else
9446 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9447 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9448#endif
9449 if (rcStrict == VINF_SUCCESS)
9450 pVCpu->iem.s.cInstructions++;
9451 if (pVCpu->iem.s.cActiveMappings > 0)
9452 {
9453 Assert(rcStrict != VINF_SUCCESS);
9454 iemMemRollback(pVCpu);
9455 }
9456 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9457 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9458 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9459
9460//#ifdef DEBUG
9461// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9462//#endif
9463
9464#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9465 /*
9466 * Perform any VMX nested-guest instruction boundary actions.
9467 *
9468 * If any of these causes a VM-exit, we must skip executing the next
9469 * instruction (would run into stale page tables). A VM-exit makes sure
9470 * there is no interrupt-inhibition, so that should ensure we don't go
9471 * to try execute the next instruction. Clearing fExecuteInhibit is
9472 * problematic because of the setjmp/longjmp clobbering above.
9473 */
9474 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9475 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9476 || rcStrict != VINF_SUCCESS)
9477 { /* likely */ }
9478 else
9479 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9480#endif
9481
9482 /* Execute the next instruction as well if a cli, pop ss or
9483 mov ss, Gr has just completed successfully. */
9484 if ( fExecuteInhibit
9485 && rcStrict == VINF_SUCCESS
9486 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9487 {
9488 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9489 if (rcStrict == VINF_SUCCESS)
9490 {
9491#ifdef LOG_ENABLED
9492 iemLogCurInstr(pVCpu, false, pszFunction);
9493#endif
9494#ifdef IEM_WITH_SETJMP
9495 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9496 {
9497 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9498 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9499 }
9500 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9501 {
9502 pVCpu->iem.s.cLongJumps++;
9503 }
9504 IEM_CATCH_LONGJMP_END(pVCpu);
9505#else
9506 IEM_OPCODE_GET_FIRST_U8(&b);
9507 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9508#endif
9509 if (rcStrict == VINF_SUCCESS)
9510 {
9511 pVCpu->iem.s.cInstructions++;
9512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9513 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9514 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9515 { /* likely */ }
9516 else
9517 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9518#endif
9519 }
9520 if (pVCpu->iem.s.cActiveMappings > 0)
9521 {
9522 Assert(rcStrict != VINF_SUCCESS);
9523 iemMemRollback(pVCpu);
9524 }
9525 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9526 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9527 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9528 }
9529 else if (pVCpu->iem.s.cActiveMappings > 0)
9530 iemMemRollback(pVCpu);
9531 /** @todo drop this after we bake this change into RIP advancing. */
9532 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9533 }
9534
9535 /*
9536 * Return value fiddling, statistics and sanity assertions.
9537 */
9538 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9539
9540 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9541 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9542 return rcStrict;
9543}
9544
9545
9546/**
9547 * Execute one instruction.
9548 *
9549 * @return Strict VBox status code.
9550 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9551 */
9552VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9553{
9554 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9555#ifdef LOG_ENABLED
9556 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9557#endif
9558
9559 /*
9560 * Do the decoding and emulation.
9561 */
9562 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9563 if (rcStrict == VINF_SUCCESS)
9564 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9565 else if (pVCpu->iem.s.cActiveMappings > 0)
9566 iemMemRollback(pVCpu);
9567
9568 if (rcStrict != VINF_SUCCESS)
9569 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9570 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9571 return rcStrict;
9572}
9573
9574
9575VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9576{
9577 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9578 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9579 if (rcStrict == VINF_SUCCESS)
9580 {
9581 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9582 if (pcbWritten)
9583 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9584 }
9585 else if (pVCpu->iem.s.cActiveMappings > 0)
9586 iemMemRollback(pVCpu);
9587
9588 return rcStrict;
9589}
9590
9591
9592VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9593 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9594{
9595 VBOXSTRICTRC rcStrict;
9596 if ( cbOpcodeBytes
9597 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9598 {
9599 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9600#ifdef IEM_WITH_CODE_TLB
9601 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9602 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9603 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9604 pVCpu->iem.s.offCurInstrStart = 0;
9605 pVCpu->iem.s.offInstrNextByte = 0;
9606 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9607#else
9608 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9609 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9610#endif
9611 rcStrict = VINF_SUCCESS;
9612 }
9613 else
9614 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9615 if (rcStrict == VINF_SUCCESS)
9616 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9617 else if (pVCpu->iem.s.cActiveMappings > 0)
9618 iemMemRollback(pVCpu);
9619
9620 return rcStrict;
9621}
9622
9623
9624VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9625{
9626 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9627 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9628 if (rcStrict == VINF_SUCCESS)
9629 {
9630 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9631 if (pcbWritten)
9632 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9633 }
9634 else if (pVCpu->iem.s.cActiveMappings > 0)
9635 iemMemRollback(pVCpu);
9636
9637 return rcStrict;
9638}
9639
9640
9641VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9642 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9643{
9644 VBOXSTRICTRC rcStrict;
9645 if ( cbOpcodeBytes
9646 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9647 {
9648 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9649#ifdef IEM_WITH_CODE_TLB
9650 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9651 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9652 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9653 pVCpu->iem.s.offCurInstrStart = 0;
9654 pVCpu->iem.s.offInstrNextByte = 0;
9655 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9656#else
9657 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9658 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9659#endif
9660 rcStrict = VINF_SUCCESS;
9661 }
9662 else
9663 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9664 if (rcStrict == VINF_SUCCESS)
9665 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9666 else if (pVCpu->iem.s.cActiveMappings > 0)
9667 iemMemRollback(pVCpu);
9668
9669 return rcStrict;
9670}
9671
9672
9673/**
9674 * For handling split cacheline lock operations when the host has split-lock
9675 * detection enabled.
9676 *
9677 * This will cause the interpreter to disregard the lock prefix and implicit
9678 * locking (xchg).
9679 *
9680 * @returns Strict VBox status code.
9681 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9682 */
9683VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9684{
9685 /*
9686 * Do the decoding and emulation.
9687 */
9688 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9689 if (rcStrict == VINF_SUCCESS)
9690 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9691 else if (pVCpu->iem.s.cActiveMappings > 0)
9692 iemMemRollback(pVCpu);
9693
9694 if (rcStrict != VINF_SUCCESS)
9695 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9696 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9697 return rcStrict;
9698}
9699
9700
9701/**
9702 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9703 * inject a pending TRPM trap.
9704 */
9705VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9706{
9707 Assert(TRPMHasTrap(pVCpu));
9708
9709 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9710 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9711 {
9712 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9713#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9714 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9715 if (fIntrEnabled)
9716 {
9717 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9718 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9719 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9720 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9721 else
9722 {
9723 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9724 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9725 }
9726 }
9727#else
9728 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9729#endif
9730 if (fIntrEnabled)
9731 {
9732 uint8_t u8TrapNo;
9733 TRPMEVENT enmType;
9734 uint32_t uErrCode;
9735 RTGCPTR uCr2;
9736 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9737 AssertRC(rc2);
9738 Assert(enmType == TRPM_HARDWARE_INT);
9739 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9740
9741 TRPMResetTrap(pVCpu);
9742
9743#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9744 /* Injecting an event may cause a VM-exit. */
9745 if ( rcStrict != VINF_SUCCESS
9746 && rcStrict != VINF_IEM_RAISED_XCPT)
9747 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9748#else
9749 NOREF(rcStrict);
9750#endif
9751 }
9752 }
9753
9754 return VINF_SUCCESS;
9755}
9756
9757
9758VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9759{
9760 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9761 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9762 Assert(cMaxInstructions > 0);
9763
9764 /*
9765 * See if there is an interrupt pending in TRPM, inject it if we can.
9766 */
9767 /** @todo What if we are injecting an exception and not an interrupt? Is that
9768 * possible here? For now we assert it is indeed only an interrupt. */
9769 if (!TRPMHasTrap(pVCpu))
9770 { /* likely */ }
9771 else
9772 {
9773 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9775 { /*likely */ }
9776 else
9777 return rcStrict;
9778 }
9779
9780 /*
9781 * Initial decoder init w/ prefetch, then setup setjmp.
9782 */
9783 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9784 if (rcStrict == VINF_SUCCESS)
9785 {
9786#ifdef IEM_WITH_SETJMP
9787 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9788 IEM_TRY_SETJMP(pVCpu, rcStrict)
9789#endif
9790 {
9791 /*
9792 * The run loop. We limit ourselves to 4096 instructions right now.
9793 */
9794 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9795 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9796 for (;;)
9797 {
9798 /*
9799 * Log the state.
9800 */
9801#ifdef LOG_ENABLED
9802 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9803#endif
9804
9805 /*
9806 * Do the decoding and emulation.
9807 */
9808 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9809 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9810#ifdef VBOX_STRICT
9811 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9812#endif
9813 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9814 {
9815 Assert(pVCpu->iem.s.cActiveMappings == 0);
9816 pVCpu->iem.s.cInstructions++;
9817
9818#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9819 /* Perform any VMX nested-guest instruction boundary actions. */
9820 uint64_t fCpu = pVCpu->fLocalForcedActions;
9821 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9822 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9823 { /* likely */ }
9824 else
9825 {
9826 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9827 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9828 fCpu = pVCpu->fLocalForcedActions;
9829 else
9830 {
9831 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9832 break;
9833 }
9834 }
9835#endif
9836 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9837 {
9838#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9839 uint64_t fCpu = pVCpu->fLocalForcedActions;
9840#endif
9841 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9842 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9843 | VMCPU_FF_TLB_FLUSH
9844 | VMCPU_FF_UNHALT );
9845
9846 if (RT_LIKELY( ( !fCpu
9847 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9848 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9849 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9850 {
9851 if (--cMaxInstructionsGccStupidity > 0)
9852 {
9853 /* Poll timers every now an then according to the caller's specs. */
9854 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9855 || !TMTimerPollBool(pVM, pVCpu))
9856 {
9857 Assert(pVCpu->iem.s.cActiveMappings == 0);
9858 iemReInitDecoder(pVCpu);
9859 continue;
9860 }
9861 }
9862 }
9863 }
9864 Assert(pVCpu->iem.s.cActiveMappings == 0);
9865 }
9866 else if (pVCpu->iem.s.cActiveMappings > 0)
9867 iemMemRollback(pVCpu);
9868 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9869 break;
9870 }
9871 }
9872#ifdef IEM_WITH_SETJMP
9873 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9874 {
9875 if (pVCpu->iem.s.cActiveMappings > 0)
9876 iemMemRollback(pVCpu);
9877# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9878 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9879# endif
9880 pVCpu->iem.s.cLongJumps++;
9881 }
9882 IEM_CATCH_LONGJMP_END(pVCpu);
9883#endif
9884
9885 /*
9886 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9887 */
9888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9890 }
9891 else
9892 {
9893 if (pVCpu->iem.s.cActiveMappings > 0)
9894 iemMemRollback(pVCpu);
9895
9896#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9897 /*
9898 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9899 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9900 */
9901 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9902#endif
9903 }
9904
9905 /*
9906 * Maybe re-enter raw-mode and log.
9907 */
9908 if (rcStrict != VINF_SUCCESS)
9909 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9911 if (pcInstructions)
9912 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9913 return rcStrict;
9914}
9915
9916
9917/**
9918 * Interface used by EMExecuteExec, does exit statistics and limits.
9919 *
9920 * @returns Strict VBox status code.
9921 * @param pVCpu The cross context virtual CPU structure.
9922 * @param fWillExit To be defined.
9923 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9924 * @param cMaxInstructions Maximum number of instructions to execute.
9925 * @param cMaxInstructionsWithoutExits
9926 * The max number of instructions without exits.
9927 * @param pStats Where to return statistics.
9928 */
9929VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9930 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9931{
9932 NOREF(fWillExit); /** @todo define flexible exit crits */
9933
9934 /*
9935 * Initialize return stats.
9936 */
9937 pStats->cInstructions = 0;
9938 pStats->cExits = 0;
9939 pStats->cMaxExitDistance = 0;
9940 pStats->cReserved = 0;
9941
9942 /*
9943 * Initial decoder init w/ prefetch, then setup setjmp.
9944 */
9945 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9946 if (rcStrict == VINF_SUCCESS)
9947 {
9948#ifdef IEM_WITH_SETJMP
9949 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9950 IEM_TRY_SETJMP(pVCpu, rcStrict)
9951#endif
9952 {
9953#ifdef IN_RING0
9954 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9955#endif
9956 uint32_t cInstructionSinceLastExit = 0;
9957
9958 /*
9959 * The run loop. We limit ourselves to 4096 instructions right now.
9960 */
9961 PVM pVM = pVCpu->CTX_SUFF(pVM);
9962 for (;;)
9963 {
9964 /*
9965 * Log the state.
9966 */
9967#ifdef LOG_ENABLED
9968 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9969#endif
9970
9971 /*
9972 * Do the decoding and emulation.
9973 */
9974 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9975
9976 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9977 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9978
9979 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9980 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9981 {
9982 pStats->cExits += 1;
9983 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9984 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9985 cInstructionSinceLastExit = 0;
9986 }
9987
9988 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9989 {
9990 Assert(pVCpu->iem.s.cActiveMappings == 0);
9991 pVCpu->iem.s.cInstructions++;
9992 pStats->cInstructions++;
9993 cInstructionSinceLastExit++;
9994
9995#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9996 /* Perform any VMX nested-guest instruction boundary actions. */
9997 uint64_t fCpu = pVCpu->fLocalForcedActions;
9998 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9999 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10000 { /* likely */ }
10001 else
10002 {
10003 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10004 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10005 fCpu = pVCpu->fLocalForcedActions;
10006 else
10007 {
10008 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10009 break;
10010 }
10011 }
10012#endif
10013 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10014 {
10015#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10016 uint64_t fCpu = pVCpu->fLocalForcedActions;
10017#endif
10018 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10019 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10020 | VMCPU_FF_TLB_FLUSH
10021 | VMCPU_FF_UNHALT );
10022 if (RT_LIKELY( ( ( !fCpu
10023 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10024 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10025 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10026 || pStats->cInstructions < cMinInstructions))
10027 {
10028 if (pStats->cInstructions < cMaxInstructions)
10029 {
10030 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10031 {
10032#ifdef IN_RING0
10033 if ( !fCheckPreemptionPending
10034 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10035#endif
10036 {
10037 Assert(pVCpu->iem.s.cActiveMappings == 0);
10038 iemReInitDecoder(pVCpu);
10039 continue;
10040 }
10041#ifdef IN_RING0
10042 rcStrict = VINF_EM_RAW_INTERRUPT;
10043 break;
10044#endif
10045 }
10046 }
10047 }
10048 Assert(!(fCpu & VMCPU_FF_IEM));
10049 }
10050 Assert(pVCpu->iem.s.cActiveMappings == 0);
10051 }
10052 else if (pVCpu->iem.s.cActiveMappings > 0)
10053 iemMemRollback(pVCpu);
10054 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10055 break;
10056 }
10057 }
10058#ifdef IEM_WITH_SETJMP
10059 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10060 {
10061 if (pVCpu->iem.s.cActiveMappings > 0)
10062 iemMemRollback(pVCpu);
10063 pVCpu->iem.s.cLongJumps++;
10064 }
10065 IEM_CATCH_LONGJMP_END(pVCpu);
10066#endif
10067
10068 /*
10069 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10070 */
10071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10073 }
10074 else
10075 {
10076 if (pVCpu->iem.s.cActiveMappings > 0)
10077 iemMemRollback(pVCpu);
10078
10079#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10080 /*
10081 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10082 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10083 */
10084 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10085#endif
10086 }
10087
10088 /*
10089 * Maybe re-enter raw-mode and log.
10090 */
10091 if (rcStrict != VINF_SUCCESS)
10092 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10093 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10094 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10095 return rcStrict;
10096}
10097
10098
10099/**
10100 * Injects a trap, fault, abort, software interrupt or external interrupt.
10101 *
10102 * The parameter list matches TRPMQueryTrapAll pretty closely.
10103 *
10104 * @returns Strict VBox status code.
10105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10106 * @param u8TrapNo The trap number.
10107 * @param enmType What type is it (trap/fault/abort), software
10108 * interrupt or hardware interrupt.
10109 * @param uErrCode The error code if applicable.
10110 * @param uCr2 The CR2 value if applicable.
10111 * @param cbInstr The instruction length (only relevant for
10112 * software interrupts).
10113 */
10114VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10115 uint8_t cbInstr)
10116{
10117 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10118#ifdef DBGFTRACE_ENABLED
10119 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10120 u8TrapNo, enmType, uErrCode, uCr2);
10121#endif
10122
10123 uint32_t fFlags;
10124 switch (enmType)
10125 {
10126 case TRPM_HARDWARE_INT:
10127 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10128 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10129 uErrCode = uCr2 = 0;
10130 break;
10131
10132 case TRPM_SOFTWARE_INT:
10133 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10134 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10135 uErrCode = uCr2 = 0;
10136 break;
10137
10138 case TRPM_TRAP:
10139 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10140 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10141 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10142 if (u8TrapNo == X86_XCPT_PF)
10143 fFlags |= IEM_XCPT_FLAGS_CR2;
10144 switch (u8TrapNo)
10145 {
10146 case X86_XCPT_DF:
10147 case X86_XCPT_TS:
10148 case X86_XCPT_NP:
10149 case X86_XCPT_SS:
10150 case X86_XCPT_PF:
10151 case X86_XCPT_AC:
10152 case X86_XCPT_GP:
10153 fFlags |= IEM_XCPT_FLAGS_ERR;
10154 break;
10155 }
10156 break;
10157
10158 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10159 }
10160
10161 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10162
10163 if (pVCpu->iem.s.cActiveMappings > 0)
10164 iemMemRollback(pVCpu);
10165
10166 return rcStrict;
10167}
10168
10169
10170/**
10171 * Injects the active TRPM event.
10172 *
10173 * @returns Strict VBox status code.
10174 * @param pVCpu The cross context virtual CPU structure.
10175 */
10176VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10177{
10178#ifndef IEM_IMPLEMENTS_TASKSWITCH
10179 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10180#else
10181 uint8_t u8TrapNo;
10182 TRPMEVENT enmType;
10183 uint32_t uErrCode;
10184 RTGCUINTPTR uCr2;
10185 uint8_t cbInstr;
10186 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10187 if (RT_FAILURE(rc))
10188 return rc;
10189
10190 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10191 * ICEBP \#DB injection as a special case. */
10192 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10193#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10194 if (rcStrict == VINF_SVM_VMEXIT)
10195 rcStrict = VINF_SUCCESS;
10196#endif
10197#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10198 if (rcStrict == VINF_VMX_VMEXIT)
10199 rcStrict = VINF_SUCCESS;
10200#endif
10201 /** @todo Are there any other codes that imply the event was successfully
10202 * delivered to the guest? See @bugref{6607}. */
10203 if ( rcStrict == VINF_SUCCESS
10204 || rcStrict == VINF_IEM_RAISED_XCPT)
10205 TRPMResetTrap(pVCpu);
10206
10207 return rcStrict;
10208#endif
10209}
10210
10211
10212VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10213{
10214 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10215 return VERR_NOT_IMPLEMENTED;
10216}
10217
10218
10219VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10220{
10221 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10222 return VERR_NOT_IMPLEMENTED;
10223}
10224
10225
10226/**
10227 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10228 *
10229 * This API ASSUMES that the caller has already verified that the guest code is
10230 * allowed to access the I/O port. (The I/O port is in the DX register in the
10231 * guest state.)
10232 *
10233 * @returns Strict VBox status code.
10234 * @param pVCpu The cross context virtual CPU structure.
10235 * @param cbValue The size of the I/O port access (1, 2, or 4).
10236 * @param enmAddrMode The addressing mode.
10237 * @param fRepPrefix Indicates whether a repeat prefix is used
10238 * (doesn't matter which for this instruction).
10239 * @param cbInstr The instruction length in bytes.
10240 * @param iEffSeg The effective segment address.
10241 * @param fIoChecked Whether the access to the I/O port has been
10242 * checked or not. It's typically checked in the
10243 * HM scenario.
10244 */
10245VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10246 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10247{
10248 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10249 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10250
10251 /*
10252 * State init.
10253 */
10254 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10255
10256 /*
10257 * Switch orgy for getting to the right handler.
10258 */
10259 VBOXSTRICTRC rcStrict;
10260 if (fRepPrefix)
10261 {
10262 switch (enmAddrMode)
10263 {
10264 case IEMMODE_16BIT:
10265 switch (cbValue)
10266 {
10267 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10268 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10269 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10270 default:
10271 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10272 }
10273 break;
10274
10275 case IEMMODE_32BIT:
10276 switch (cbValue)
10277 {
10278 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10279 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10280 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10281 default:
10282 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10283 }
10284 break;
10285
10286 case IEMMODE_64BIT:
10287 switch (cbValue)
10288 {
10289 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10290 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10291 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10292 default:
10293 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10294 }
10295 break;
10296
10297 default:
10298 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10299 }
10300 }
10301 else
10302 {
10303 switch (enmAddrMode)
10304 {
10305 case IEMMODE_16BIT:
10306 switch (cbValue)
10307 {
10308 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10309 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10310 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10311 default:
10312 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10313 }
10314 break;
10315
10316 case IEMMODE_32BIT:
10317 switch (cbValue)
10318 {
10319 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10320 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10321 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10322 default:
10323 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10324 }
10325 break;
10326
10327 case IEMMODE_64BIT:
10328 switch (cbValue)
10329 {
10330 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10331 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10332 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10333 default:
10334 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10335 }
10336 break;
10337
10338 default:
10339 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10340 }
10341 }
10342
10343 if (pVCpu->iem.s.cActiveMappings)
10344 iemMemRollback(pVCpu);
10345
10346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10347}
10348
10349
10350/**
10351 * Interface for HM and EM for executing string I/O IN (read) instructions.
10352 *
10353 * This API ASSUMES that the caller has already verified that the guest code is
10354 * allowed to access the I/O port. (The I/O port is in the DX register in the
10355 * guest state.)
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure.
10359 * @param cbValue The size of the I/O port access (1, 2, or 4).
10360 * @param enmAddrMode The addressing mode.
10361 * @param fRepPrefix Indicates whether a repeat prefix is used
10362 * (doesn't matter which for this instruction).
10363 * @param cbInstr The instruction length in bytes.
10364 * @param fIoChecked Whether the access to the I/O port has been
10365 * checked or not. It's typically checked in the
10366 * HM scenario.
10367 */
10368VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10369 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10370{
10371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10372
10373 /*
10374 * State init.
10375 */
10376 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10377
10378 /*
10379 * Switch orgy for getting to the right handler.
10380 */
10381 VBOXSTRICTRC rcStrict;
10382 if (fRepPrefix)
10383 {
10384 switch (enmAddrMode)
10385 {
10386 case IEMMODE_16BIT:
10387 switch (cbValue)
10388 {
10389 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10390 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10391 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10392 default:
10393 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10394 }
10395 break;
10396
10397 case IEMMODE_32BIT:
10398 switch (cbValue)
10399 {
10400 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10401 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10402 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10403 default:
10404 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10405 }
10406 break;
10407
10408 case IEMMODE_64BIT:
10409 switch (cbValue)
10410 {
10411 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10412 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10413 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10414 default:
10415 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10416 }
10417 break;
10418
10419 default:
10420 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10421 }
10422 }
10423 else
10424 {
10425 switch (enmAddrMode)
10426 {
10427 case IEMMODE_16BIT:
10428 switch (cbValue)
10429 {
10430 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10431 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10432 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10433 default:
10434 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10435 }
10436 break;
10437
10438 case IEMMODE_32BIT:
10439 switch (cbValue)
10440 {
10441 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10442 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10443 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10444 default:
10445 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10446 }
10447 break;
10448
10449 case IEMMODE_64BIT:
10450 switch (cbValue)
10451 {
10452 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10453 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10454 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10455 default:
10456 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10457 }
10458 break;
10459
10460 default:
10461 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10462 }
10463 }
10464
10465 if ( pVCpu->iem.s.cActiveMappings == 0
10466 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10467 { /* likely */ }
10468 else
10469 {
10470 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10471 iemMemRollback(pVCpu);
10472 }
10473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10474}
10475
10476
10477/**
10478 * Interface for rawmode to write execute an OUT instruction.
10479 *
10480 * @returns Strict VBox status code.
10481 * @param pVCpu The cross context virtual CPU structure.
10482 * @param cbInstr The instruction length in bytes.
10483 * @param u16Port The port to read.
10484 * @param fImm Whether the port is specified using an immediate operand or
10485 * using the implicit DX register.
10486 * @param cbReg The register size.
10487 *
10488 * @remarks In ring-0 not all of the state needs to be synced in.
10489 */
10490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10491{
10492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10493 Assert(cbReg <= 4 && cbReg != 3);
10494
10495 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10496 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10497 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10498 Assert(!pVCpu->iem.s.cActiveMappings);
10499 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10500}
10501
10502
10503/**
10504 * Interface for rawmode to write execute an IN instruction.
10505 *
10506 * @returns Strict VBox status code.
10507 * @param pVCpu The cross context virtual CPU structure.
10508 * @param cbInstr The instruction length in bytes.
10509 * @param u16Port The port to read.
10510 * @param fImm Whether the port is specified using an immediate operand or
10511 * using the implicit DX.
10512 * @param cbReg The register size.
10513 */
10514VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10515{
10516 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10517 Assert(cbReg <= 4 && cbReg != 3);
10518
10519 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10520 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10521 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10522 Assert(!pVCpu->iem.s.cActiveMappings);
10523 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10524}
10525
10526
10527/**
10528 * Interface for HM and EM to write to a CRx register.
10529 *
10530 * @returns Strict VBox status code.
10531 * @param pVCpu The cross context virtual CPU structure.
10532 * @param cbInstr The instruction length in bytes.
10533 * @param iCrReg The control register number (destination).
10534 * @param iGReg The general purpose register number (source).
10535 *
10536 * @remarks In ring-0 not all of the state needs to be synced in.
10537 */
10538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10539{
10540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10541 Assert(iCrReg < 16);
10542 Assert(iGReg < 16);
10543
10544 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10545 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10546 Assert(!pVCpu->iem.s.cActiveMappings);
10547 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10548}
10549
10550
10551/**
10552 * Interface for HM and EM to read from a CRx register.
10553 *
10554 * @returns Strict VBox status code.
10555 * @param pVCpu The cross context virtual CPU structure.
10556 * @param cbInstr The instruction length in bytes.
10557 * @param iGReg The general purpose register number (destination).
10558 * @param iCrReg The control register number (source).
10559 *
10560 * @remarks In ring-0 not all of the state needs to be synced in.
10561 */
10562VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10563{
10564 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10565 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10566 | CPUMCTX_EXTRN_APIC_TPR);
10567 Assert(iCrReg < 16);
10568 Assert(iGReg < 16);
10569
10570 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10571 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10572 Assert(!pVCpu->iem.s.cActiveMappings);
10573 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10574}
10575
10576
10577/**
10578 * Interface for HM and EM to write to a DRx register.
10579 *
10580 * @returns Strict VBox status code.
10581 * @param pVCpu The cross context virtual CPU structure.
10582 * @param cbInstr The instruction length in bytes.
10583 * @param iDrReg The debug register number (destination).
10584 * @param iGReg The general purpose register number (source).
10585 *
10586 * @remarks In ring-0 not all of the state needs to be synced in.
10587 */
10588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10589{
10590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10591 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10592 Assert(iDrReg < 8);
10593 Assert(iGReg < 16);
10594
10595 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10596 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10597 Assert(!pVCpu->iem.s.cActiveMappings);
10598 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10599}
10600
10601
10602/**
10603 * Interface for HM and EM to read from a DRx register.
10604 *
10605 * @returns Strict VBox status code.
10606 * @param pVCpu The cross context virtual CPU structure.
10607 * @param cbInstr The instruction length in bytes.
10608 * @param iGReg The general purpose register number (destination).
10609 * @param iDrReg The debug register number (source).
10610 *
10611 * @remarks In ring-0 not all of the state needs to be synced in.
10612 */
10613VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10614{
10615 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10616 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10617 Assert(iDrReg < 8);
10618 Assert(iGReg < 16);
10619
10620 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10621 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10622 Assert(!pVCpu->iem.s.cActiveMappings);
10623 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10624}
10625
10626
10627/**
10628 * Interface for HM and EM to clear the CR0[TS] bit.
10629 *
10630 * @returns Strict VBox status code.
10631 * @param pVCpu The cross context virtual CPU structure.
10632 * @param cbInstr The instruction length in bytes.
10633 *
10634 * @remarks In ring-0 not all of the state needs to be synced in.
10635 */
10636VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10637{
10638 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10639
10640 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10641 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10642 Assert(!pVCpu->iem.s.cActiveMappings);
10643 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10644}
10645
10646
10647/**
10648 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10649 *
10650 * @returns Strict VBox status code.
10651 * @param pVCpu The cross context virtual CPU structure.
10652 * @param cbInstr The instruction length in bytes.
10653 * @param uValue The value to load into CR0.
10654 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10655 * memory operand. Otherwise pass NIL_RTGCPTR.
10656 *
10657 * @remarks In ring-0 not all of the state needs to be synced in.
10658 */
10659VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10660{
10661 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10662
10663 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10664 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10665 Assert(!pVCpu->iem.s.cActiveMappings);
10666 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10667}
10668
10669
10670/**
10671 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10672 *
10673 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10674 *
10675 * @returns Strict VBox status code.
10676 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10677 * @param cbInstr The instruction length in bytes.
10678 * @remarks In ring-0 not all of the state needs to be synced in.
10679 * @thread EMT(pVCpu)
10680 */
10681VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10682{
10683 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10684
10685 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10686 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10687 Assert(!pVCpu->iem.s.cActiveMappings);
10688 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10689}
10690
10691
10692/**
10693 * Interface for HM and EM to emulate the WBINVD instruction.
10694 *
10695 * @returns Strict VBox status code.
10696 * @param pVCpu The cross context virtual CPU structure.
10697 * @param cbInstr The instruction length in bytes.
10698 *
10699 * @remarks In ring-0 not all of the state needs to be synced in.
10700 */
10701VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10702{
10703 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10704
10705 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10706 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10707 Assert(!pVCpu->iem.s.cActiveMappings);
10708 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10709}
10710
10711
10712/**
10713 * Interface for HM and EM to emulate the INVD instruction.
10714 *
10715 * @returns Strict VBox status code.
10716 * @param pVCpu The cross context virtual CPU structure.
10717 * @param cbInstr The instruction length in bytes.
10718 *
10719 * @remarks In ring-0 not all of the state needs to be synced in.
10720 */
10721VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10722{
10723 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10724
10725 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10726 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10727 Assert(!pVCpu->iem.s.cActiveMappings);
10728 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10729}
10730
10731
10732/**
10733 * Interface for HM and EM to emulate the INVLPG instruction.
10734 *
10735 * @returns Strict VBox status code.
10736 * @retval VINF_PGM_SYNC_CR3
10737 *
10738 * @param pVCpu The cross context virtual CPU structure.
10739 * @param cbInstr The instruction length in bytes.
10740 * @param GCPtrPage The effective address of the page to invalidate.
10741 *
10742 * @remarks In ring-0 not all of the state needs to be synced in.
10743 */
10744VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10747
10748 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to emulate the INVPCID instruction.
10757 *
10758 * @returns Strict VBox status code.
10759 * @retval VINF_PGM_SYNC_CR3
10760 *
10761 * @param pVCpu The cross context virtual CPU structure.
10762 * @param cbInstr The instruction length in bytes.
10763 * @param iEffSeg The effective segment register.
10764 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10765 * @param uType The invalidation type.
10766 *
10767 * @remarks In ring-0 not all of the state needs to be synced in.
10768 */
10769VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10770 uint64_t uType)
10771{
10772 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10773
10774 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10775 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10776 Assert(!pVCpu->iem.s.cActiveMappings);
10777 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10778}
10779
10780
10781/**
10782 * Interface for HM and EM to emulate the CPUID instruction.
10783 *
10784 * @returns Strict VBox status code.
10785 *
10786 * @param pVCpu The cross context virtual CPU structure.
10787 * @param cbInstr The instruction length in bytes.
10788 *
10789 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10790 */
10791VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10792{
10793 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10794 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10795
10796 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10797 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10798 Assert(!pVCpu->iem.s.cActiveMappings);
10799 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10800}
10801
10802
10803/**
10804 * Interface for HM and EM to emulate the RDPMC instruction.
10805 *
10806 * @returns Strict VBox status code.
10807 *
10808 * @param pVCpu The cross context virtual CPU structure.
10809 * @param cbInstr The instruction length in bytes.
10810 *
10811 * @remarks Not all of the state needs to be synced in.
10812 */
10813VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10814{
10815 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10816 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10817
10818 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10819 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10820 Assert(!pVCpu->iem.s.cActiveMappings);
10821 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10822}
10823
10824
10825/**
10826 * Interface for HM and EM to emulate the RDTSC instruction.
10827 *
10828 * @returns Strict VBox status code.
10829 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10830 *
10831 * @param pVCpu The cross context virtual CPU structure.
10832 * @param cbInstr The instruction length in bytes.
10833 *
10834 * @remarks Not all of the state needs to be synced in.
10835 */
10836VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10837{
10838 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10839 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10840
10841 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10842 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10843 Assert(!pVCpu->iem.s.cActiveMappings);
10844 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10845}
10846
10847
10848/**
10849 * Interface for HM and EM to emulate the RDTSCP instruction.
10850 *
10851 * @returns Strict VBox status code.
10852 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10853 *
10854 * @param pVCpu The cross context virtual CPU structure.
10855 * @param cbInstr The instruction length in bytes.
10856 *
10857 * @remarks Not all of the state needs to be synced in. Recommended
10858 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10859 */
10860VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10861{
10862 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10863 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10864
10865 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10866 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10867 Assert(!pVCpu->iem.s.cActiveMappings);
10868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10869}
10870
10871
10872/**
10873 * Interface for HM and EM to emulate the RDMSR instruction.
10874 *
10875 * @returns Strict VBox status code.
10876 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10877 *
10878 * @param pVCpu The cross context virtual CPU structure.
10879 * @param cbInstr The instruction length in bytes.
10880 *
10881 * @remarks Not all of the state needs to be synced in. Requires RCX and
10882 * (currently) all MSRs.
10883 */
10884VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10885{
10886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10888
10889 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10890 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10891 Assert(!pVCpu->iem.s.cActiveMappings);
10892 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10893}
10894
10895
10896/**
10897 * Interface for HM and EM to emulate the WRMSR instruction.
10898 *
10899 * @returns Strict VBox status code.
10900 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10901 *
10902 * @param pVCpu The cross context virtual CPU structure.
10903 * @param cbInstr The instruction length in bytes.
10904 *
10905 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10906 * and (currently) all MSRs.
10907 */
10908VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10909{
10910 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10911 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10912 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10913
10914 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10915 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10916 Assert(!pVCpu->iem.s.cActiveMappings);
10917 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10918}
10919
10920
10921/**
10922 * Interface for HM and EM to emulate the MONITOR instruction.
10923 *
10924 * @returns Strict VBox status code.
10925 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10926 *
10927 * @param pVCpu The cross context virtual CPU structure.
10928 * @param cbInstr The instruction length in bytes.
10929 *
10930 * @remarks Not all of the state needs to be synced in.
10931 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10932 * are used.
10933 */
10934VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10935{
10936 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10937 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10938
10939 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10940 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10941 Assert(!pVCpu->iem.s.cActiveMappings);
10942 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10943}
10944
10945
10946/**
10947 * Interface for HM and EM to emulate the MWAIT instruction.
10948 *
10949 * @returns Strict VBox status code.
10950 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10951 *
10952 * @param pVCpu The cross context virtual CPU structure.
10953 * @param cbInstr The instruction length in bytes.
10954 *
10955 * @remarks Not all of the state needs to be synced in.
10956 */
10957VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10958{
10959 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10960 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10961
10962 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10963 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10964 Assert(!pVCpu->iem.s.cActiveMappings);
10965 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10966}
10967
10968
10969/**
10970 * Interface for HM and EM to emulate the HLT instruction.
10971 *
10972 * @returns Strict VBox status code.
10973 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10974 *
10975 * @param pVCpu The cross context virtual CPU structure.
10976 * @param cbInstr The instruction length in bytes.
10977 *
10978 * @remarks Not all of the state needs to be synced in.
10979 */
10980VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10981{
10982 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10983
10984 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10985 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10986 Assert(!pVCpu->iem.s.cActiveMappings);
10987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10988}
10989
10990
10991/**
10992 * Checks if IEM is in the process of delivering an event (interrupt or
10993 * exception).
10994 *
10995 * @returns true if we're in the process of raising an interrupt or exception,
10996 * false otherwise.
10997 * @param pVCpu The cross context virtual CPU structure.
10998 * @param puVector Where to store the vector associated with the
10999 * currently delivered event, optional.
11000 * @param pfFlags Where to store th event delivery flags (see
11001 * IEM_XCPT_FLAGS_XXX), optional.
11002 * @param puErr Where to store the error code associated with the
11003 * event, optional.
11004 * @param puCr2 Where to store the CR2 associated with the event,
11005 * optional.
11006 * @remarks The caller should check the flags to determine if the error code and
11007 * CR2 are valid for the event.
11008 */
11009VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11010{
11011 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11012 if (fRaisingXcpt)
11013 {
11014 if (puVector)
11015 *puVector = pVCpu->iem.s.uCurXcpt;
11016 if (pfFlags)
11017 *pfFlags = pVCpu->iem.s.fCurXcpt;
11018 if (puErr)
11019 *puErr = pVCpu->iem.s.uCurXcptErr;
11020 if (puCr2)
11021 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11022 }
11023 return fRaisingXcpt;
11024}
11025
11026#ifdef IN_RING3
11027
11028/**
11029 * Handles the unlikely and probably fatal merge cases.
11030 *
11031 * @returns Merged status code.
11032 * @param rcStrict Current EM status code.
11033 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11034 * with @a rcStrict.
11035 * @param iMemMap The memory mapping index. For error reporting only.
11036 * @param pVCpu The cross context virtual CPU structure of the calling
11037 * thread, for error reporting only.
11038 */
11039DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11040 unsigned iMemMap, PVMCPUCC pVCpu)
11041{
11042 if (RT_FAILURE_NP(rcStrict))
11043 return rcStrict;
11044
11045 if (RT_FAILURE_NP(rcStrictCommit))
11046 return rcStrictCommit;
11047
11048 if (rcStrict == rcStrictCommit)
11049 return rcStrictCommit;
11050
11051 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11052 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11054 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11055 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11056 return VERR_IOM_FF_STATUS_IPE;
11057}
11058
11059
11060/**
11061 * Helper for IOMR3ProcessForceFlag.
11062 *
11063 * @returns Merged status code.
11064 * @param rcStrict Current EM status code.
11065 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11066 * with @a rcStrict.
11067 * @param iMemMap The memory mapping index. For error reporting only.
11068 * @param pVCpu The cross context virtual CPU structure of the calling
11069 * thread, for error reporting only.
11070 */
11071DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11072{
11073 /* Simple. */
11074 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11075 return rcStrictCommit;
11076
11077 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11078 return rcStrict;
11079
11080 /* EM scheduling status codes. */
11081 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11082 && rcStrict <= VINF_EM_LAST))
11083 {
11084 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11085 && rcStrictCommit <= VINF_EM_LAST))
11086 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11087 }
11088
11089 /* Unlikely */
11090 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11091}
11092
11093
11094/**
11095 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11096 *
11097 * @returns Merge between @a rcStrict and what the commit operation returned.
11098 * @param pVM The cross context VM structure.
11099 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11100 * @param rcStrict The status code returned by ring-0 or raw-mode.
11101 */
11102VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11103{
11104 /*
11105 * Reset the pending commit.
11106 */
11107 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11108 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11109 ("%#x %#x %#x\n",
11110 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11111 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11112
11113 /*
11114 * Commit the pending bounce buffers (usually just one).
11115 */
11116 unsigned cBufs = 0;
11117 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11118 while (iMemMap-- > 0)
11119 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11120 {
11121 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11122 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11123 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11124
11125 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11126 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11127 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11128
11129 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11130 {
11131 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11132 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11133 pbBuf,
11134 cbFirst,
11135 PGMACCESSORIGIN_IEM);
11136 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11137 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11138 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11139 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11140 }
11141
11142 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11143 {
11144 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11145 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11146 pbBuf + cbFirst,
11147 cbSecond,
11148 PGMACCESSORIGIN_IEM);
11149 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11150 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11151 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11152 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11153 }
11154 cBufs++;
11155 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11156 }
11157
11158 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11159 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11160 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11161 pVCpu->iem.s.cActiveMappings = 0;
11162 return rcStrict;
11163}
11164
11165#endif /* IN_RING3 */
11166
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette