VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103649

Last change on this file since 103649 was 103592, checked in by vboxsync, 12 months ago

VMM/IEM: Native translation of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() body (untested due to no instruction still being re-compilable), bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 449.3 KB
Line 
1/* $Id: IEMAll.cpp 103592 2024-02-27 17:19:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint64_t const uExitInfo1 = SelTss;
2402 uint64_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 break;
3200 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3201 break;
3202
3203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3204 }
3205
3206 /* Check DPL against CPL if applicable. */
3207 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3208 {
3209 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214 }
3215
3216 /* Is it there? */
3217 if (!Idte.Gate.u1Present)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3220 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 /* Is it a task-gate? */
3224 if (fTaskGate)
3225 {
3226 /*
3227 * Construct the error code masks based on what caused this task switch.
3228 * See Intel Instruction reference for INT.
3229 */
3230 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3231 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3232 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3233 RTSEL SelTss = Idte.Gate.u16Sel;
3234
3235 /*
3236 * Fetch the TSS descriptor in the GDT.
3237 */
3238 IEMSELDESC DescTSS;
3239 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3240 if (rcStrict != VINF_SUCCESS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3243 VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /* The TSS descriptor must be a system segment and be available (not busy). */
3248 if ( DescTSS.Legacy.Gen.u1DescType
3249 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3250 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3253 u8Vector, SelTss, DescTSS.Legacy.au64));
3254 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3255 }
3256
3257 /* The TSS must be present. */
3258 if (!DescTSS.Legacy.Gen.u1Present)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* Do the actual task switch. */
3265 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3266 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3267 fFlags, uErr, uCr2, SelTss, &DescTSS);
3268 }
3269
3270 /* A null CS is bad. */
3271 RTSEL NewCS = Idte.Gate.u16Sel;
3272 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3275 return iemRaiseGeneralProtectionFault0(pVCpu);
3276 }
3277
3278 /* Fetch the descriptor for the new CS. */
3279 IEMSELDESC DescCS;
3280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3281 if (rcStrict != VINF_SUCCESS)
3282 {
3283 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3284 return rcStrict;
3285 }
3286
3287 /* Must be a code segment. */
3288 if (!DescCS.Legacy.Gen.u1DescType)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3291 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3292 }
3293 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3297 }
3298
3299 /* Don't allow lowering the privilege level. */
3300 /** @todo Does the lowering of privileges apply to software interrupts
3301 * only? This has bearings on the more-privileged or
3302 * same-privilege stack behavior further down. A testcase would
3303 * be nice. */
3304 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3307 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3308 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3309 }
3310
3311 /* Make sure the selector is present. */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3316 }
3317
3318#ifdef LOG_ENABLED
3319 /* If software interrupt, try decode it if logging is enabled and such. */
3320 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3321 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3322 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3323#endif
3324
3325 /* Check the new EIP against the new CS limit. */
3326 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3327 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3328 ? Idte.Gate.u16OffsetLow
3329 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3330 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3331 if (uNewEip > cbLimitCS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3334 u8Vector, uNewEip, cbLimitCS, NewCS));
3335 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3336 }
3337 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3338
3339 /* Calc the flag image to push. */
3340 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3341 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3342 fEfl &= ~X86_EFL_RF;
3343 else
3344 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3345
3346 /* From V8086 mode only go to CPL 0. */
3347 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3348 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3349 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3350 {
3351 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3352 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3353 }
3354
3355 /*
3356 * If the privilege level changes, we need to get a new stack from the TSS.
3357 * This in turns means validating the new SS and ESP...
3358 */
3359 if (uNewCpl != IEM_GET_CPL(pVCpu))
3360 {
3361 RTSEL NewSS;
3362 uint32_t uNewEsp;
3363 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3364 if (rcStrict != VINF_SUCCESS)
3365 return rcStrict;
3366
3367 IEMSELDESC DescSS;
3368 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3369 if (rcStrict != VINF_SUCCESS)
3370 return rcStrict;
3371 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3372 if (!DescSS.Legacy.Gen.u1DefBig)
3373 {
3374 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3375 uNewEsp = (uint16_t)uNewEsp;
3376 }
3377
3378 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3379
3380 /* Check that there is sufficient space for the stack frame. */
3381 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3382 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3383 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3384 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3387 {
3388 if ( uNewEsp - 1 > cbLimitSS
3389 || uNewEsp < cbStackFrame)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3392 u8Vector, NewSS, uNewEsp, cbStackFrame));
3393 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3394 }
3395 }
3396 else
3397 {
3398 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3399 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3400 {
3401 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3402 u8Vector, NewSS, uNewEsp, cbStackFrame));
3403 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3404 }
3405 }
3406
3407 /*
3408 * Start making changes.
3409 */
3410
3411 /* Set the new CPL so that stack accesses use it. */
3412 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3413 IEM_SET_CPL(pVCpu, uNewCpl);
3414
3415 /* Create the stack frame. */
3416 uint8_t bUnmapInfoStackFrame;
3417 RTPTRUNION uStackFrame;
3418 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3419 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3420 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 if (f32BitGate)
3424 {
3425 if (fFlags & IEM_XCPT_FLAGS_ERR)
3426 *uStackFrame.pu32++ = uErr;
3427 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3428 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3429 uStackFrame.pu32[2] = fEfl;
3430 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3431 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3432 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3436 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3437 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3438 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3439 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3440 }
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3450 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3462 if (rcStrict != VINF_SUCCESS)
3463 return rcStrict;
3464
3465 /* Mark the selectors 'accessed' (hope this is the correct time). */
3466 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3467 * after pushing the stack frame? (Write protect the gdt + stack to
3468 * find out.) */
3469 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 /*
3486 * Start comitting the register changes (joins with the DPL=CPL branch).
3487 */
3488 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3491 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3492 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3493 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3494 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3495 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3496 * SP is loaded).
3497 * Need to check the other combinations too:
3498 * - 16-bit TSS, 32-bit handler
3499 * - 32-bit TSS, 16-bit handler */
3500 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3501 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3502 else
3503 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3504
3505 if (fEfl & X86_EFL_VM)
3506 {
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3511 }
3512 }
3513 /*
3514 * Same privilege, no stack change and smaller stack frame.
3515 */
3516 else
3517 {
3518 uint64_t uNewRsp;
3519 uint8_t bUnmapInfoStackFrame;
3520 RTPTRUNION uStackFrame;
3521 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3522 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3523 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 if (f32BitGate)
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu32++ = uErr;
3531 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu32[2] = fEfl;
3534 }
3535 else
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu16++ = uErr;
3539 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu16[2] = fEfl;
3542 }
3543 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546
3547 /* Mark the CS selector as 'accessed'. */
3548 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 /*
3557 * Start committing the register changes (joins with the other branch).
3558 */
3559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3560 }
3561
3562 /* ... register committing continues. */
3563 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3565 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3566 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3567 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3569
3570 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3571 fEfl &= ~fEflToClear;
3572 IEMMISC_SET_EFL(pVCpu, fEfl);
3573
3574 if (fFlags & IEM_XCPT_FLAGS_CR2)
3575 pVCpu->cpum.GstCtx.cr2 = uCr2;
3576
3577 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3578 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3579
3580 /* Make sure the execution flags are correct. */
3581 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3582 if (fExecNew != pVCpu->iem.s.fExec)
3583 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3584 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3585 pVCpu->iem.s.fExec = fExecNew;
3586 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3587
3588 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3589}
3590
3591
3592/**
3593 * Implements exceptions and interrupts for long mode.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param cbInstr The number of bytes to offset rIP by in the return
3598 * address.
3599 * @param u8Vector The interrupt / exception vector number.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 */
3604static VBOXSTRICTRC
3605iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3606 uint8_t cbInstr,
3607 uint8_t u8Vector,
3608 uint32_t fFlags,
3609 uint16_t uErr,
3610 uint64_t uCr2) RT_NOEXCEPT
3611{
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614 /*
3615 * Read the IDT entry.
3616 */
3617 uint16_t offIdt = (uint16_t)u8Vector << 4;
3618 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3621 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3622 }
3623 X86DESC64 Idte;
3624#ifdef _MSC_VER /* Shut up silly compiler warning. */
3625 Idte.au64[0] = 0;
3626 Idte.au64[1] = 0;
3627#endif
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3637 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3638 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3639
3640 /*
3641 * Check the descriptor type, DPL and such.
3642 * ASSUMES this is done in the same order as described for call-gate calls.
3643 */
3644 if (Idte.Gate.u1DescType)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3650 switch (Idte.Gate.u4Type)
3651 {
3652 case AMD64_SEL_TYPE_SYS_INT_GATE:
3653 fEflToClear |= X86_EFL_IF;
3654 break;
3655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3656 break;
3657
3658 default:
3659 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3660 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3661 }
3662
3663 /* Check DPL against CPL if applicable. */
3664 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3665 {
3666 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3669 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671 }
3672
3673 /* Is it there? */
3674 if (!Idte.Gate.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3677 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679
3680 /* A null CS is bad. */
3681 RTSEL NewCS = Idte.Gate.u16Sel;
3682 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687
3688 /* Fetch the descriptor for the new CS. */
3689 IEMSELDESC DescCS;
3690 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3691 if (rcStrict != VINF_SUCCESS)
3692 {
3693 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3694 return rcStrict;
3695 }
3696
3697 /* Must be a 64-bit code segment. */
3698 if (!DescCS.Long.Gen.u1DescType)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703 if ( !DescCS.Long.Gen.u1Long
3704 || DescCS.Long.Gen.u1DefBig
3705 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3708 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711
3712 /* Don't allow lowering the privilege level. For non-conforming CS
3713 selectors, the CS.DPL sets the privilege level the trap/interrupt
3714 handler runs at. For conforming CS selectors, the CPL remains
3715 unchanged, but the CS.DPL must be <= CPL. */
3716 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3717 * when CPU in Ring-0. Result \#GP? */
3718 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3721 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3722 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3723 }
3724
3725
3726 /* Make sure the selector is present. */
3727 if (!DescCS.Legacy.Gen.u1Present)
3728 {
3729 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3730 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3731 }
3732
3733 /* Check that the new RIP is canonical. */
3734 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3735 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3736 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3740 return iemRaiseGeneralProtectionFault0(pVCpu);
3741 }
3742
3743 /*
3744 * If the privilege level changes or if the IST isn't zero, we need to get
3745 * a new stack from the TSS.
3746 */
3747 uint64_t uNewRsp;
3748 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3749 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3750 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3751 || Idte.Gate.u3IST != 0)
3752 {
3753 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3754 if (rcStrict != VINF_SUCCESS)
3755 return rcStrict;
3756 }
3757 else
3758 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3759 uNewRsp &= ~(uint64_t)0xf;
3760
3761 /*
3762 * Calc the flag image to push.
3763 */
3764 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3765 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3766 fEfl &= ~X86_EFL_RF;
3767 else
3768 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3769
3770 /*
3771 * Start making changes.
3772 */
3773 /* Set the new CPL so that stack accesses use it. */
3774 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3775 IEM_SET_CPL(pVCpu, uNewCpl);
3776/** @todo Setting CPL this early seems wrong as it would affect and errors we
3777 * raise accessing the stack and (?) GDT/LDT... */
3778
3779 /* Create the stack frame. */
3780 uint8_t bUnmapInfoStackFrame;
3781 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3782 RTPTRUNION uStackFrame;
3783 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3784 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu64++ = uErr;
3790 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3791 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3792 uStackFrame.pu64[2] = fEfl;
3793 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3794 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3800 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3801 * after pushing the stack frame? (Write protect the gdt + stack to
3802 * find out.) */
3803 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3804 {
3805 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3806 if (rcStrict != VINF_SUCCESS)
3807 return rcStrict;
3808 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3809 }
3810
3811 /*
3812 * Start comitting the register changes.
3813 */
3814 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3815 * hidden registers when interrupting 32-bit or 16-bit code! */
3816 if (uNewCpl != uOldCpl)
3817 {
3818 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3820 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3822 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3823 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3824 }
3825 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3826 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.rip = uNewRip;
3833
3834 fEfl &= ~fEflToClear;
3835 IEMMISC_SET_EFL(pVCpu, fEfl);
3836
3837 if (fFlags & IEM_XCPT_FLAGS_CR2)
3838 pVCpu->cpum.GstCtx.cr2 = uCr2;
3839
3840 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3841 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3842
3843 iemRecalcExecModeAndCplFlags(pVCpu);
3844
3845 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements exceptions and interrupts.
3851 *
3852 * All exceptions and interrupts goes thru this function!
3853 *
3854 * @returns VBox strict status code.
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param cbInstr The number of bytes to offset rIP by in the return
3857 * address.
3858 * @param u8Vector The interrupt / exception vector number.
3859 * @param fFlags The flags.
3860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3862 */
3863VBOXSTRICTRC
3864iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3865 uint8_t cbInstr,
3866 uint8_t u8Vector,
3867 uint32_t fFlags,
3868 uint16_t uErr,
3869 uint64_t uCr2) RT_NOEXCEPT
3870{
3871 /*
3872 * Get all the state that we might need here.
3873 */
3874 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3876
3877#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3878 /*
3879 * Flush prefetch buffer
3880 */
3881 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3882#endif
3883
3884 /*
3885 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3886 */
3887 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3888 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3889 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3890 | IEM_XCPT_FLAGS_BP_INSTR
3891 | IEM_XCPT_FLAGS_ICEBP_INSTR
3892 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900
3901 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3902#ifdef DBGFTRACE_ENABLED
3903 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3904 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3906#endif
3907
3908 /*
3909 * Check if DBGF wants to intercept the exception.
3910 */
3911 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3913 { /* likely */ }
3914 else
3915 {
3916 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3917 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920 }
3921
3922 /*
3923 * Evaluate whether NMI blocking should be in effect.
3924 * Normally, NMI blocking is in effect whenever we inject an NMI.
3925 */
3926 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3927 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3928
3929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3930 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3931 {
3932 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3933 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3934 return rcStrict0;
3935
3936 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3937 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3938 {
3939 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3940 fBlockNmi = false;
3941 }
3942 }
3943#endif
3944
3945#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3946 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3947 {
3948 /*
3949 * If the event is being injected as part of VMRUN, it isn't subject to event
3950 * intercepts in the nested-guest. However, secondary exceptions that occur
3951 * during injection of any event -are- subject to exception intercepts.
3952 *
3953 * See AMD spec. 15.20 "Event Injection".
3954 */
3955 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3956 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3957 else
3958 {
3959 /*
3960 * Check and handle if the event being raised is intercepted.
3961 */
3962 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3963 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3964 return rcStrict0;
3965 }
3966 }
3967#endif
3968
3969 /*
3970 * Set NMI blocking if necessary.
3971 */
3972 if (fBlockNmi)
3973 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3974
3975 /*
3976 * Do recursion accounting.
3977 */
3978 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3979 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3980 if (pVCpu->iem.s.cXcptRecursions == 0)
3981 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3982 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3983 else
3984 {
3985 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3986 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3987 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3988
3989 if (pVCpu->iem.s.cXcptRecursions >= 4)
3990 {
3991#ifdef DEBUG_bird
3992 AssertFailed();
3993#endif
3994 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3995 }
3996
3997 /*
3998 * Evaluate the sequence of recurring events.
3999 */
4000 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4001 NULL /* pXcptRaiseInfo */);
4002 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4003 { /* likely */ }
4004 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4005 {
4006 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4007 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4008 u8Vector = X86_XCPT_DF;
4009 uErr = 0;
4010#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4011 /* VMX nested-guest #DF intercept needs to be checked here. */
4012 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4013 {
4014 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4015 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4016 return rcStrict0;
4017 }
4018#endif
4019 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4020 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4022 }
4023 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 return iemInitiateCpuShutdown(pVCpu);
4027 }
4028 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4029 {
4030 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4031 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4032 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4033 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4034 return VERR_EM_GUEST_CPU_HANG;
4035 }
4036 else
4037 {
4038 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4039 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4040 return VERR_IEM_IPE_9;
4041 }
4042
4043 /*
4044 * The 'EXT' bit is set when an exception occurs during deliver of an external
4045 * event (such as an interrupt or earlier exception)[1]. Privileged software
4046 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4047 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4048 *
4049 * [1] - Intel spec. 6.13 "Error Code"
4050 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4051 * [3] - Intel Instruction reference for INT n.
4052 */
4053 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4054 && (fFlags & IEM_XCPT_FLAGS_ERR)
4055 && u8Vector != X86_XCPT_PF
4056 && u8Vector != X86_XCPT_DF)
4057 {
4058 uErr |= X86_TRAP_ERR_EXTERNAL;
4059 }
4060 }
4061
4062 pVCpu->iem.s.cXcptRecursions++;
4063 pVCpu->iem.s.uCurXcpt = u8Vector;
4064 pVCpu->iem.s.fCurXcpt = fFlags;
4065 pVCpu->iem.s.uCurXcptErr = uErr;
4066 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4067
4068 /*
4069 * Extensive logging.
4070 */
4071#if defined(LOG_ENABLED) && defined(IN_RING3)
4072 if (LogIs3Enabled())
4073 {
4074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4075 char szRegs[4096];
4076 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4077 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4078 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4079 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4080 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4081 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4082 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4083 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4084 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4085 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4086 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4087 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4088 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4089 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4090 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4091 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4092 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4093 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4094 " efer=%016VR{efer}\n"
4095 " pat=%016VR{pat}\n"
4096 " sf_mask=%016VR{sf_mask}\n"
4097 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4098 " lstar=%016VR{lstar}\n"
4099 " star=%016VR{star} cstar=%016VR{cstar}\n"
4100 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4101 );
4102
4103 char szInstr[256];
4104 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4105 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szInstr, sizeof(szInstr), NULL);
4107 Log3(("%s%s\n", szRegs, szInstr));
4108 }
4109#endif /* LOG_ENABLED */
4110
4111 /*
4112 * Stats.
4113 */
4114 uint64_t const uTimestamp = ASMReadTSC();
4115 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4116 {
4117 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4118 EMHistoryAddExit(pVCpu,
4119 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4120 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4121 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4122 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4123 }
4124 else
4125 {
4126 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4128 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 if (fFlags & IEM_XCPT_FLAGS_ERR)
4131 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4132 if (fFlags & IEM_XCPT_FLAGS_CR2)
4133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4134 }
4135
4136 /*
4137 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4138 * to ensure that a stale TLB or paging cache entry will only cause one
4139 * spurious #PF.
4140 */
4141 if ( u8Vector == X86_XCPT_PF
4142 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4143 IEMTlbInvalidatePage(pVCpu, uCr2);
4144
4145 /*
4146 * Call the mode specific worker function.
4147 */
4148 VBOXSTRICTRC rcStrict;
4149 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4150 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4151 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4152 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4153 else
4154 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4155
4156 /* Flush the prefetch buffer. */
4157 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4158
4159 /*
4160 * Unwind.
4161 */
4162 pVCpu->iem.s.cXcptRecursions--;
4163 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4164 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4165 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4166 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4167 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4168 return rcStrict;
4169}
4170
4171#ifdef IEM_WITH_SETJMP
4172/**
4173 * See iemRaiseXcptOrInt. Will not return.
4174 */
4175DECL_NO_RETURN(void)
4176iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4177 uint8_t cbInstr,
4178 uint8_t u8Vector,
4179 uint32_t fFlags,
4180 uint16_t uErr,
4181 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4184 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4185}
4186#endif
4187
4188
4189/** \#DE - 00. */
4190VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4191{
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4193}
4194
4195
4196/** \#DB - 01.
4197 * @note This automatically clear DR7.GD. */
4198VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4199{
4200 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4201 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4203}
4204
4205
4206/** \#BR - 05. */
4207VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4210}
4211
4212
4213/** \#UD - 06. */
4214VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4217}
4218
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#UD - 06. */
4222DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#NM - 07. */
4230VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#NM - 07. */
4238DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#TS(err) - 0a. */
4246VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4249}
4250
4251
4252/** \#TS(tr) - 0a. */
4253VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4254{
4255 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4256 pVCpu->cpum.GstCtx.tr.Sel, 0);
4257}
4258
4259
4260/** \#TS(0) - 0a. */
4261VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4262{
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4264 0, 0);
4265}
4266
4267
4268/** \#TS(err) - 0a. */
4269VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4270{
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4272 uSel & X86_SEL_MASK_OFF_RPL, 0);
4273}
4274
4275
4276/** \#NP(err) - 0b. */
4277VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4278{
4279 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4280}
4281
4282
4283/** \#NP(sel) - 0b. */
4284VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4285{
4286 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4289 uSel & ~X86_SEL_RPL, 0);
4290}
4291
4292
4293/** \#SS(seg) - 0c. */
4294VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4295{
4296 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4299 uSel & ~X86_SEL_RPL, 0);
4300}
4301
4302
4303/** \#SS(err) - 0c. */
4304VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4309}
4310
4311
4312/** \#GP(n) - 0d. */
4313VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4314{
4315 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4317}
4318
4319
4320/** \#GP(0) - 0d. */
4321VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4322{
4323 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4325}
4326
4327#ifdef IEM_WITH_SETJMP
4328/** \#GP(0) - 0d. */
4329DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4330{
4331 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4332 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4333}
4334#endif
4335
4336
4337/** \#GP(sel) - 0d. */
4338VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4339{
4340 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4341 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4343 Sel & ~X86_SEL_RPL, 0);
4344}
4345
4346
4347/** \#GP(0) - 0d. */
4348VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4349{
4350 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4352}
4353
4354
4355/** \#GP(sel) - 0d. */
4356VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4357{
4358 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4359 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4360 NOREF(iSegReg); NOREF(fAccess);
4361 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4362 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4363}
4364
4365#ifdef IEM_WITH_SETJMP
4366/** \#GP(sel) - 0d, longjmp. */
4367DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4368{
4369 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4370 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4371 NOREF(iSegReg); NOREF(fAccess);
4372 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4373 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4374}
4375#endif
4376
4377/** \#GP(sel) - 0d. */
4378VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4379{
4380 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4381 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4382 NOREF(Sel);
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4384}
4385
4386#ifdef IEM_WITH_SETJMP
4387/** \#GP(sel) - 0d, longjmp. */
4388DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4389{
4390 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4392 NOREF(Sel);
4393 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4394}
4395#endif
4396
4397
4398/** \#GP(sel) - 0d. */
4399VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4400{
4401 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4402 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4403 NOREF(iSegReg); NOREF(fAccess);
4404 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4405}
4406
4407#ifdef IEM_WITH_SETJMP
4408/** \#GP(sel) - 0d, longjmp. */
4409DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4410{
4411 NOREF(iSegReg); NOREF(fAccess);
4412 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4413}
4414#endif
4415
4416
4417/** \#PF(n) - 0e. */
4418VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4419{
4420 uint16_t uErr;
4421 switch (rc)
4422 {
4423 case VERR_PAGE_NOT_PRESENT:
4424 case VERR_PAGE_TABLE_NOT_PRESENT:
4425 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4426 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4427 uErr = 0;
4428 break;
4429
4430 default:
4431 AssertMsgFailed(("%Rrc\n", rc));
4432 RT_FALL_THRU();
4433 case VERR_ACCESS_DENIED:
4434 uErr = X86_TRAP_PF_P;
4435 break;
4436
4437 /** @todo reserved */
4438 }
4439
4440 if (IEM_GET_CPL(pVCpu) == 3)
4441 uErr |= X86_TRAP_PF_US;
4442
4443 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4444 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4445 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4446 uErr |= X86_TRAP_PF_ID;
4447
4448#if 0 /* This is so much non-sense, really. Why was it done like that? */
4449 /* Note! RW access callers reporting a WRITE protection fault, will clear
4450 the READ flag before calling. So, read-modify-write accesses (RW)
4451 can safely be reported as READ faults. */
4452 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4453 uErr |= X86_TRAP_PF_RW;
4454#else
4455 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4456 {
4457 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4458 /// (regardless of outcome of the comparison in the latter case).
4459 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4460 uErr |= X86_TRAP_PF_RW;
4461 }
4462#endif
4463
4464 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4465 of the memory operand rather than at the start of it. (Not sure what
4466 happens if it crosses a page boundrary.) The current heuristics for
4467 this is to report the #PF for the last byte if the access is more than
4468 64 bytes. This is probably not correct, but we can work that out later,
4469 main objective now is to get FXSAVE to work like for real hardware and
4470 make bs3-cpu-basic2 work. */
4471 if (cbAccess <= 64)
4472 { /* likely*/ }
4473 else
4474 GCPtrWhere += cbAccess - 1;
4475
4476 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4477 uErr, GCPtrWhere);
4478}
4479
4480#ifdef IEM_WITH_SETJMP
4481/** \#PF(n) - 0e, longjmp. */
4482DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4483 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4484{
4485 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4486}
4487#endif
4488
4489
4490/** \#MF(0) - 10. */
4491VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4492{
4493 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4494 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4495
4496 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4497 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4498 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4499}
4500
4501
4502/** \#AC(0) - 11. */
4503VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4504{
4505 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4506}
4507
4508#ifdef IEM_WITH_SETJMP
4509/** \#AC(0) - 11, longjmp. */
4510DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4511{
4512 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4513}
4514#endif
4515
4516
4517/** \#XF(0)/\#XM(0) - 19. */
4518VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4519{
4520 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4521}
4522
4523
4524/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4525IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4526{
4527 NOREF(cbInstr);
4528 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4529}
4530
4531
4532/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4533IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4534{
4535 NOREF(cbInstr);
4536 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4537}
4538
4539
4540/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4541IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4542{
4543 NOREF(cbInstr);
4544 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4545}
4546
4547
4548/** @} */
4549
4550/** @name Common opcode decoders.
4551 * @{
4552 */
4553//#include <iprt/mem.h>
4554
4555/**
4556 * Used to add extra details about a stub case.
4557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4558 */
4559void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4560{
4561#if defined(LOG_ENABLED) && defined(IN_RING3)
4562 PVM pVM = pVCpu->CTX_SUFF(pVM);
4563 char szRegs[4096];
4564 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4565 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4566 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4567 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4568 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4569 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4570 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4571 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4572 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4573 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4574 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4575 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4576 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4577 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4578 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4579 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4580 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4581 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4582 " efer=%016VR{efer}\n"
4583 " pat=%016VR{pat}\n"
4584 " sf_mask=%016VR{sf_mask}\n"
4585 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4586 " lstar=%016VR{lstar}\n"
4587 " star=%016VR{star} cstar=%016VR{cstar}\n"
4588 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4589 );
4590
4591 char szInstr[256];
4592 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4593 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4594 szInstr, sizeof(szInstr), NULL);
4595
4596 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4597#else
4598 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4599#endif
4600}
4601
4602/** @} */
4603
4604
4605
4606/** @name Register Access.
4607 * @{
4608 */
4609
4610/**
4611 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4612 *
4613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4614 * segment limit.
4615 *
4616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4617 * @param cbInstr Instruction size.
4618 * @param offNextInstr The offset of the next instruction.
4619 * @param enmEffOpSize Effective operand size.
4620 */
4621VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4622 IEMMODE enmEffOpSize) RT_NOEXCEPT
4623{
4624 switch (enmEffOpSize)
4625 {
4626 case IEMMODE_16BIT:
4627 {
4628 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4629 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4630 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4631 pVCpu->cpum.GstCtx.rip = uNewIp;
4632 else
4633 return iemRaiseGeneralProtectionFault0(pVCpu);
4634 break;
4635 }
4636
4637 case IEMMODE_32BIT:
4638 {
4639 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4640 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4641
4642 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4643 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4644 pVCpu->cpum.GstCtx.rip = uNewEip;
4645 else
4646 return iemRaiseGeneralProtectionFault0(pVCpu);
4647 break;
4648 }
4649
4650 case IEMMODE_64BIT:
4651 {
4652 Assert(IEM_IS_64BIT_CODE(pVCpu));
4653
4654 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4655 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4656 pVCpu->cpum.GstCtx.rip = uNewRip;
4657 else
4658 return iemRaiseGeneralProtectionFault0(pVCpu);
4659 break;
4660 }
4661
4662 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4663 }
4664
4665#ifndef IEM_WITH_CODE_TLB
4666 /* Flush the prefetch buffer. */
4667 pVCpu->iem.s.cbOpcode = cbInstr;
4668#endif
4669
4670 /*
4671 * Clear RF and finish the instruction (maybe raise #DB).
4672 */
4673 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4674}
4675
4676
4677/**
4678 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4679 *
4680 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4681 * segment limit.
4682 *
4683 * @returns Strict VBox status code.
4684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4685 * @param cbInstr Instruction size.
4686 * @param offNextInstr The offset of the next instruction.
4687 */
4688VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4689{
4690 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4691
4692 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4693 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4694 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4695 pVCpu->cpum.GstCtx.rip = uNewIp;
4696 else
4697 return iemRaiseGeneralProtectionFault0(pVCpu);
4698
4699#ifndef IEM_WITH_CODE_TLB
4700 /* Flush the prefetch buffer. */
4701 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4702#endif
4703
4704 /*
4705 * Clear RF and finish the instruction (maybe raise #DB).
4706 */
4707 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4708}
4709
4710
4711/**
4712 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4713 *
4714 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4715 * segment limit.
4716 *
4717 * @returns Strict VBox status code.
4718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4719 * @param cbInstr Instruction size.
4720 * @param offNextInstr The offset of the next instruction.
4721 * @param enmEffOpSize Effective operand size.
4722 */
4723VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4724 IEMMODE enmEffOpSize) RT_NOEXCEPT
4725{
4726 if (enmEffOpSize == IEMMODE_32BIT)
4727 {
4728 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4729
4730 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4731 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4732 pVCpu->cpum.GstCtx.rip = uNewEip;
4733 else
4734 return iemRaiseGeneralProtectionFault0(pVCpu);
4735 }
4736 else
4737 {
4738 Assert(enmEffOpSize == IEMMODE_64BIT);
4739
4740 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4741 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4742 pVCpu->cpum.GstCtx.rip = uNewRip;
4743 else
4744 return iemRaiseGeneralProtectionFault0(pVCpu);
4745 }
4746
4747#ifndef IEM_WITH_CODE_TLB
4748 /* Flush the prefetch buffer. */
4749 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4750#endif
4751
4752 /*
4753 * Clear RF and finish the instruction (maybe raise #DB).
4754 */
4755 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4756}
4757
4758/** @} */
4759
4760
4761/** @name FPU access and helpers.
4762 *
4763 * @{
4764 */
4765
4766/**
4767 * Updates the x87.DS and FPUDP registers.
4768 *
4769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4770 * @param pFpuCtx The FPU context.
4771 * @param iEffSeg The effective segment register.
4772 * @param GCPtrEff The effective address relative to @a iEffSeg.
4773 */
4774DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4775{
4776 RTSEL sel;
4777 switch (iEffSeg)
4778 {
4779 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4780 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4781 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4782 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4783 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4784 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4785 default:
4786 AssertMsgFailed(("%d\n", iEffSeg));
4787 sel = pVCpu->cpum.GstCtx.ds.Sel;
4788 }
4789 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4790 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4791 {
4792 pFpuCtx->DS = 0;
4793 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4794 }
4795 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4796 {
4797 pFpuCtx->DS = sel;
4798 pFpuCtx->FPUDP = GCPtrEff;
4799 }
4800 else
4801 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4802}
4803
4804
4805/**
4806 * Rotates the stack registers in the push direction.
4807 *
4808 * @param pFpuCtx The FPU context.
4809 * @remarks This is a complete waste of time, but fxsave stores the registers in
4810 * stack order.
4811 */
4812DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4813{
4814 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4815 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4816 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4817 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4818 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4819 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4820 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4821 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4822 pFpuCtx->aRegs[0].r80 = r80Tmp;
4823}
4824
4825
4826/**
4827 * Rotates the stack registers in the pop direction.
4828 *
4829 * @param pFpuCtx The FPU context.
4830 * @remarks This is a complete waste of time, but fxsave stores the registers in
4831 * stack order.
4832 */
4833DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4834{
4835 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4836 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4837 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4838 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4839 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4840 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4841 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4842 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4843 pFpuCtx->aRegs[7].r80 = r80Tmp;
4844}
4845
4846
4847/**
4848 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4849 * exception prevents it.
4850 *
4851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4852 * @param pResult The FPU operation result to push.
4853 * @param pFpuCtx The FPU context.
4854 */
4855static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4856{
4857 /* Update FSW and bail if there are pending exceptions afterwards. */
4858 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4859 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4860 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4861 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4862 {
4863 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4864 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4865 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4866 pFpuCtx->FSW = fFsw;
4867 return;
4868 }
4869
4870 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4871 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4872 {
4873 /* All is fine, push the actual value. */
4874 pFpuCtx->FTW |= RT_BIT(iNewTop);
4875 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4876 }
4877 else if (pFpuCtx->FCW & X86_FCW_IM)
4878 {
4879 /* Masked stack overflow, push QNaN. */
4880 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4881 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4882 }
4883 else
4884 {
4885 /* Raise stack overflow, don't push anything. */
4886 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4887 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4888 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4889 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4890 return;
4891 }
4892
4893 fFsw &= ~X86_FSW_TOP_MASK;
4894 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4895 pFpuCtx->FSW = fFsw;
4896
4897 iemFpuRotateStackPush(pFpuCtx);
4898 RT_NOREF(pVCpu);
4899}
4900
4901
4902/**
4903 * Stores a result in a FPU register and updates the FSW and FTW.
4904 *
4905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4906 * @param pFpuCtx The FPU context.
4907 * @param pResult The result to store.
4908 * @param iStReg Which FPU register to store it in.
4909 */
4910static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4911{
4912 Assert(iStReg < 8);
4913 uint16_t fNewFsw = pFpuCtx->FSW;
4914 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4915 fNewFsw &= ~X86_FSW_C_MASK;
4916 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4917 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4918 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4919 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4920 pFpuCtx->FSW = fNewFsw;
4921 pFpuCtx->FTW |= RT_BIT(iReg);
4922 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4923 RT_NOREF(pVCpu);
4924}
4925
4926
4927/**
4928 * Only updates the FPU status word (FSW) with the result of the current
4929 * instruction.
4930 *
4931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4932 * @param pFpuCtx The FPU context.
4933 * @param u16FSW The FSW output of the current instruction.
4934 */
4935static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4936{
4937 uint16_t fNewFsw = pFpuCtx->FSW;
4938 fNewFsw &= ~X86_FSW_C_MASK;
4939 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4940 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4941 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4942 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4943 pFpuCtx->FSW = fNewFsw;
4944 RT_NOREF(pVCpu);
4945}
4946
4947
4948/**
4949 * Pops one item off the FPU stack if no pending exception prevents it.
4950 *
4951 * @param pFpuCtx The FPU context.
4952 */
4953static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4954{
4955 /* Check pending exceptions. */
4956 uint16_t uFSW = pFpuCtx->FSW;
4957 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4958 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4959 return;
4960
4961 /* TOP--. */
4962 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4963 uFSW &= ~X86_FSW_TOP_MASK;
4964 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4965 pFpuCtx->FSW = uFSW;
4966
4967 /* Mark the previous ST0 as empty. */
4968 iOldTop >>= X86_FSW_TOP_SHIFT;
4969 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4970
4971 /* Rotate the registers. */
4972 iemFpuRotateStackPop(pFpuCtx);
4973}
4974
4975
4976/**
4977 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4978 *
4979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4980 * @param pResult The FPU operation result to push.
4981 * @param uFpuOpcode The FPU opcode value.
4982 */
4983void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4984{
4985 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4986 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4987 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4988}
4989
4990
4991/**
4992 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4993 * and sets FPUDP and FPUDS.
4994 *
4995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4996 * @param pResult The FPU operation result to push.
4997 * @param iEffSeg The effective segment register.
4998 * @param GCPtrEff The effective address relative to @a iEffSeg.
4999 * @param uFpuOpcode The FPU opcode value.
5000 */
5001void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5002 uint16_t uFpuOpcode) RT_NOEXCEPT
5003{
5004 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5005 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5006 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5007 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5008}
5009
5010
5011/**
5012 * Replace ST0 with the first value and push the second onto the FPU stack,
5013 * unless a pending exception prevents it.
5014 *
5015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5016 * @param pResult The FPU operation result to store and push.
5017 * @param uFpuOpcode The FPU opcode value.
5018 */
5019void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5020{
5021 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5022 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5023
5024 /* Update FSW and bail if there are pending exceptions afterwards. */
5025 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5026 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5027 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5028 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5029 {
5030 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5031 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5032 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5033 pFpuCtx->FSW = fFsw;
5034 return;
5035 }
5036
5037 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5038 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5039 {
5040 /* All is fine, push the actual value. */
5041 pFpuCtx->FTW |= RT_BIT(iNewTop);
5042 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5043 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5044 }
5045 else if (pFpuCtx->FCW & X86_FCW_IM)
5046 {
5047 /* Masked stack overflow, push QNaN. */
5048 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5049 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5050 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5051 }
5052 else
5053 {
5054 /* Raise stack overflow, don't push anything. */
5055 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5056 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5057 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5058 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5059 return;
5060 }
5061
5062 fFsw &= ~X86_FSW_TOP_MASK;
5063 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5064 pFpuCtx->FSW = fFsw;
5065
5066 iemFpuRotateStackPush(pFpuCtx);
5067}
5068
5069
5070/**
5071 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5072 * FOP.
5073 *
5074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5075 * @param pResult The result to store.
5076 * @param iStReg Which FPU register to store it in.
5077 * @param uFpuOpcode The FPU opcode value.
5078 */
5079void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5080{
5081 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5082 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5083 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5084}
5085
5086
5087/**
5088 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5089 * FOP, and then pops the stack.
5090 *
5091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5092 * @param pResult The result to store.
5093 * @param iStReg Which FPU register to store it in.
5094 * @param uFpuOpcode The FPU opcode value.
5095 */
5096void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5097{
5098 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5099 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5100 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5101 iemFpuMaybePopOne(pFpuCtx);
5102}
5103
5104
5105/**
5106 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5107 * FPUDP, and FPUDS.
5108 *
5109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5110 * @param pResult The result to store.
5111 * @param iStReg Which FPU register to store it in.
5112 * @param iEffSeg The effective memory operand selector register.
5113 * @param GCPtrEff The effective memory operand offset.
5114 * @param uFpuOpcode The FPU opcode value.
5115 */
5116void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5117 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5118{
5119 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5120 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5121 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5122 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5123}
5124
5125
5126/**
5127 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5128 * FPUDP, and FPUDS, and then pops the stack.
5129 *
5130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5131 * @param pResult The result to store.
5132 * @param iStReg Which FPU register to store it in.
5133 * @param iEffSeg The effective memory operand selector register.
5134 * @param GCPtrEff The effective memory operand offset.
5135 * @param uFpuOpcode The FPU opcode value.
5136 */
5137void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5138 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5139{
5140 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5141 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5142 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5143 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5144 iemFpuMaybePopOne(pFpuCtx);
5145}
5146
5147
5148/**
5149 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5150 *
5151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5152 * @param uFpuOpcode The FPU opcode value.
5153 */
5154void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5155{
5156 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5157 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5158}
5159
5160
5161/**
5162 * Updates the FSW, FOP, FPUIP, and FPUCS.
5163 *
5164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5165 * @param u16FSW The FSW from the current instruction.
5166 * @param uFpuOpcode The FPU opcode value.
5167 */
5168void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5169{
5170 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5171 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5172 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5173}
5174
5175
5176/**
5177 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5178 *
5179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5180 * @param u16FSW The FSW from the current instruction.
5181 * @param uFpuOpcode The FPU opcode value.
5182 */
5183void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5184{
5185 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5186 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5187 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5188 iemFpuMaybePopOne(pFpuCtx);
5189}
5190
5191
5192/**
5193 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5194 *
5195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5196 * @param u16FSW The FSW from the current instruction.
5197 * @param iEffSeg The effective memory operand selector register.
5198 * @param GCPtrEff The effective memory operand offset.
5199 * @param uFpuOpcode The FPU opcode value.
5200 */
5201void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5202{
5203 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5204 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5205 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5206 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5207}
5208
5209
5210/**
5211 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5212 *
5213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5214 * @param u16FSW The FSW from the current instruction.
5215 * @param uFpuOpcode The FPU opcode value.
5216 */
5217void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5218{
5219 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5220 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5221 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5222 iemFpuMaybePopOne(pFpuCtx);
5223 iemFpuMaybePopOne(pFpuCtx);
5224}
5225
5226
5227/**
5228 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5229 *
5230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5231 * @param u16FSW The FSW from the current instruction.
5232 * @param iEffSeg The effective memory operand selector register.
5233 * @param GCPtrEff The effective memory operand offset.
5234 * @param uFpuOpcode The FPU opcode value.
5235 */
5236void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5237 uint16_t uFpuOpcode) RT_NOEXCEPT
5238{
5239 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5240 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5241 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5242 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5243 iemFpuMaybePopOne(pFpuCtx);
5244}
5245
5246
5247/**
5248 * Worker routine for raising an FPU stack underflow exception.
5249 *
5250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5251 * @param pFpuCtx The FPU context.
5252 * @param iStReg The stack register being accessed.
5253 */
5254static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5255{
5256 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5257 if (pFpuCtx->FCW & X86_FCW_IM)
5258 {
5259 /* Masked underflow. */
5260 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5261 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5262 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5263 if (iStReg != UINT8_MAX)
5264 {
5265 pFpuCtx->FTW |= RT_BIT(iReg);
5266 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5267 }
5268 }
5269 else
5270 {
5271 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5272 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5273 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5274 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5275 }
5276 RT_NOREF(pVCpu);
5277}
5278
5279
5280/**
5281 * Raises a FPU stack underflow exception.
5282 *
5283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5284 * @param iStReg The destination register that should be loaded
5285 * with QNaN if \#IS is not masked. Specify
5286 * UINT8_MAX if none (like for fcom).
5287 * @param uFpuOpcode The FPU opcode value.
5288 */
5289void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5290{
5291 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5292 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5293 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5294}
5295
5296
5297void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5298{
5299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5300 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5301 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5302 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5303}
5304
5305
5306void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5307{
5308 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5309 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5310 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5311 iemFpuMaybePopOne(pFpuCtx);
5312}
5313
5314
5315void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5316 uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5320 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5321 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5322 iemFpuMaybePopOne(pFpuCtx);
5323}
5324
5325
5326void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5327{
5328 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5329 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5330 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5331 iemFpuMaybePopOne(pFpuCtx);
5332 iemFpuMaybePopOne(pFpuCtx);
5333}
5334
5335
5336void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5337{
5338 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5339 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5340
5341 if (pFpuCtx->FCW & X86_FCW_IM)
5342 {
5343 /* Masked overflow - Push QNaN. */
5344 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5345 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5346 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5347 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5348 pFpuCtx->FTW |= RT_BIT(iNewTop);
5349 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5350 iemFpuRotateStackPush(pFpuCtx);
5351 }
5352 else
5353 {
5354 /* Exception pending - don't change TOP or the register stack. */
5355 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5356 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5357 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5358 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5359 }
5360}
5361
5362
5363void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5364{
5365 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5366 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5367
5368 if (pFpuCtx->FCW & X86_FCW_IM)
5369 {
5370 /* Masked overflow - Push QNaN. */
5371 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5372 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5373 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5374 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5375 pFpuCtx->FTW |= RT_BIT(iNewTop);
5376 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5377 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5378 iemFpuRotateStackPush(pFpuCtx);
5379 }
5380 else
5381 {
5382 /* Exception pending - don't change TOP or the register stack. */
5383 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5384 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5385 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5386 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5387 }
5388}
5389
5390
5391/**
5392 * Worker routine for raising an FPU stack overflow exception on a push.
5393 *
5394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5395 * @param pFpuCtx The FPU context.
5396 */
5397static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5398{
5399 if (pFpuCtx->FCW & X86_FCW_IM)
5400 {
5401 /* Masked overflow. */
5402 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5403 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5404 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5405 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5406 pFpuCtx->FTW |= RT_BIT(iNewTop);
5407 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5408 iemFpuRotateStackPush(pFpuCtx);
5409 }
5410 else
5411 {
5412 /* Exception pending - don't change TOP or the register stack. */
5413 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5414 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5415 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5416 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5417 }
5418 RT_NOREF(pVCpu);
5419}
5420
5421
5422/**
5423 * Raises a FPU stack overflow exception on a push.
5424 *
5425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5426 * @param uFpuOpcode The FPU opcode value.
5427 */
5428void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5429{
5430 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5431 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5432 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5433}
5434
5435
5436/**
5437 * Raises a FPU stack overflow exception on a push with a memory operand.
5438 *
5439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5440 * @param iEffSeg The effective memory operand selector register.
5441 * @param GCPtrEff The effective memory operand offset.
5442 * @param uFpuOpcode The FPU opcode value.
5443 */
5444void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5445{
5446 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5447 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5448 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5449 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5450}
5451
5452/** @} */
5453
5454
5455/** @name SSE+AVX SIMD access and helpers.
5456 *
5457 * @{
5458 */
5459/**
5460 * Stores a result in a SIMD XMM register, updates the MXCSR.
5461 *
5462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5463 * @param pResult The result to store.
5464 * @param iXmmReg Which SIMD XMM register to store the result in.
5465 */
5466void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5467{
5468 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5469 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5470
5471 /* The result is only updated if there is no unmasked exception pending. */
5472 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5473 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5474 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5475}
5476
5477
5478/**
5479 * Updates the MXCSR.
5480 *
5481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5482 * @param fMxcsr The new MXCSR value.
5483 */
5484void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5485{
5486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5487 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5488}
5489/** @} */
5490
5491
5492/** @name Memory access.
5493 *
5494 * @{
5495 */
5496
5497#undef LOG_GROUP
5498#define LOG_GROUP LOG_GROUP_IEM_MEM
5499
5500/**
5501 * Updates the IEMCPU::cbWritten counter if applicable.
5502 *
5503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5504 * @param fAccess The access being accounted for.
5505 * @param cbMem The access size.
5506 */
5507DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5508{
5509 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5510 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5511 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5512}
5513
5514
5515/**
5516 * Applies the segment limit, base and attributes.
5517 *
5518 * This may raise a \#GP or \#SS.
5519 *
5520 * @returns VBox strict status code.
5521 *
5522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5523 * @param fAccess The kind of access which is being performed.
5524 * @param iSegReg The index of the segment register to apply.
5525 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5526 * TSS, ++).
5527 * @param cbMem The access size.
5528 * @param pGCPtrMem Pointer to the guest memory address to apply
5529 * segmentation to. Input and output parameter.
5530 */
5531VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5532{
5533 if (iSegReg == UINT8_MAX)
5534 return VINF_SUCCESS;
5535
5536 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5537 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5538 switch (IEM_GET_CPU_MODE(pVCpu))
5539 {
5540 case IEMMODE_16BIT:
5541 case IEMMODE_32BIT:
5542 {
5543 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5544 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5545
5546 if ( pSel->Attr.n.u1Present
5547 && !pSel->Attr.n.u1Unusable)
5548 {
5549 Assert(pSel->Attr.n.u1DescType);
5550 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5551 {
5552 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5553 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5554 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5555
5556 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5557 {
5558 /** @todo CPL check. */
5559 }
5560
5561 /*
5562 * There are two kinds of data selectors, normal and expand down.
5563 */
5564 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5565 {
5566 if ( GCPtrFirst32 > pSel->u32Limit
5567 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5568 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5569 }
5570 else
5571 {
5572 /*
5573 * The upper boundary is defined by the B bit, not the G bit!
5574 */
5575 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5576 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5577 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5578 }
5579 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5580 }
5581 else
5582 {
5583 /*
5584 * Code selector and usually be used to read thru, writing is
5585 * only permitted in real and V8086 mode.
5586 */
5587 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5588 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5589 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5590 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5591 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5592
5593 if ( GCPtrFirst32 > pSel->u32Limit
5594 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5595 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5596
5597 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5598 {
5599 /** @todo CPL check. */
5600 }
5601
5602 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5603 }
5604 }
5605 else
5606 return iemRaiseGeneralProtectionFault0(pVCpu);
5607 return VINF_SUCCESS;
5608 }
5609
5610 case IEMMODE_64BIT:
5611 {
5612 RTGCPTR GCPtrMem = *pGCPtrMem;
5613 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5614 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5615
5616 Assert(cbMem >= 1);
5617 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5618 return VINF_SUCCESS;
5619 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5620 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5621 return iemRaiseGeneralProtectionFault0(pVCpu);
5622 }
5623
5624 default:
5625 AssertFailedReturn(VERR_IEM_IPE_7);
5626 }
5627}
5628
5629
5630/**
5631 * Translates a virtual address to a physical physical address and checks if we
5632 * can access the page as specified.
5633 *
5634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5635 * @param GCPtrMem The virtual address.
5636 * @param cbAccess The access size, for raising \#PF correctly for
5637 * FXSAVE and such.
5638 * @param fAccess The intended access.
5639 * @param pGCPhysMem Where to return the physical address.
5640 */
5641VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5642 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5643{
5644 /** @todo Need a different PGM interface here. We're currently using
5645 * generic / REM interfaces. this won't cut it for R0. */
5646 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5647 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5648 * here. */
5649 PGMPTWALK Walk;
5650 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5651 if (RT_FAILURE(rc))
5652 {
5653 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5654 /** @todo Check unassigned memory in unpaged mode. */
5655 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5656#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5657 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5658 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5659#endif
5660 *pGCPhysMem = NIL_RTGCPHYS;
5661 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5662 }
5663
5664 /* If the page is writable and does not have the no-exec bit set, all
5665 access is allowed. Otherwise we'll have to check more carefully... */
5666 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5667 {
5668 /* Write to read only memory? */
5669 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5670 && !(Walk.fEffective & X86_PTE_RW)
5671 && ( ( IEM_GET_CPL(pVCpu) == 3
5672 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5673 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5674 {
5675 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5676 *pGCPhysMem = NIL_RTGCPHYS;
5677#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5678 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5679 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5680#endif
5681 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5682 }
5683
5684 /* Kernel memory accessed by userland? */
5685 if ( !(Walk.fEffective & X86_PTE_US)
5686 && IEM_GET_CPL(pVCpu) == 3
5687 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5688 {
5689 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5690 *pGCPhysMem = NIL_RTGCPHYS;
5691#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5692 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5693 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5694#endif
5695 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5696 }
5697
5698 /* Executing non-executable memory? */
5699 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5700 && (Walk.fEffective & X86_PTE_PAE_NX)
5701 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5702 {
5703 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5704 *pGCPhysMem = NIL_RTGCPHYS;
5705#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5706 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5707 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5708#endif
5709 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5710 VERR_ACCESS_DENIED);
5711 }
5712 }
5713
5714 /*
5715 * Set the dirty / access flags.
5716 * ASSUMES this is set when the address is translated rather than on committ...
5717 */
5718 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5719 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5720 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5721 {
5722 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5723 AssertRC(rc2);
5724 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5725 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5726 }
5727
5728 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5729 *pGCPhysMem = GCPhys;
5730 return VINF_SUCCESS;
5731}
5732
5733#if 0 /*unused*/
5734/**
5735 * Looks up a memory mapping entry.
5736 *
5737 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5739 * @param pvMem The memory address.
5740 * @param fAccess The access to.
5741 */
5742DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5743{
5744 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5745 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5746 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5747 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5748 return 0;
5749 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5750 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5751 return 1;
5752 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5753 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5754 return 2;
5755 return VERR_NOT_FOUND;
5756}
5757#endif
5758
5759/**
5760 * Finds a free memmap entry when using iNextMapping doesn't work.
5761 *
5762 * @returns Memory mapping index, 1024 on failure.
5763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5764 */
5765static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5766{
5767 /*
5768 * The easy case.
5769 */
5770 if (pVCpu->iem.s.cActiveMappings == 0)
5771 {
5772 pVCpu->iem.s.iNextMapping = 1;
5773 return 0;
5774 }
5775
5776 /* There should be enough mappings for all instructions. */
5777 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5778
5779 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5780 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5781 return i;
5782
5783 AssertFailedReturn(1024);
5784}
5785
5786
5787/**
5788 * Commits a bounce buffer that needs writing back and unmaps it.
5789 *
5790 * @returns Strict VBox status code.
5791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5792 * @param iMemMap The index of the buffer to commit.
5793 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5794 * Always false in ring-3, obviously.
5795 */
5796static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5797{
5798 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5799 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5800#ifdef IN_RING3
5801 Assert(!fPostponeFail);
5802 RT_NOREF_PV(fPostponeFail);
5803#endif
5804
5805 /*
5806 * Do the writing.
5807 */
5808 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5809 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5810 {
5811 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5812 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5813 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5814 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5815 {
5816 /*
5817 * Carefully and efficiently dealing with access handler return
5818 * codes make this a little bloated.
5819 */
5820 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5821 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5822 pbBuf,
5823 cbFirst,
5824 PGMACCESSORIGIN_IEM);
5825 if (rcStrict == VINF_SUCCESS)
5826 {
5827 if (cbSecond)
5828 {
5829 rcStrict = PGMPhysWrite(pVM,
5830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5831 pbBuf + cbFirst,
5832 cbSecond,
5833 PGMACCESSORIGIN_IEM);
5834 if (rcStrict == VINF_SUCCESS)
5835 { /* nothing */ }
5836 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5837 {
5838 LogEx(LOG_GROUP_IEM,
5839 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5842 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5843 }
5844#ifndef IN_RING3
5845 else if (fPostponeFail)
5846 {
5847 LogEx(LOG_GROUP_IEM,
5848 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5850 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5852 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5853 return iemSetPassUpStatus(pVCpu, rcStrict);
5854 }
5855#endif
5856 else
5857 {
5858 LogEx(LOG_GROUP_IEM,
5859 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5862 return rcStrict;
5863 }
5864 }
5865 }
5866 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5867 {
5868 if (!cbSecond)
5869 {
5870 LogEx(LOG_GROUP_IEM,
5871 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5873 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5874 }
5875 else
5876 {
5877 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5879 pbBuf + cbFirst,
5880 cbSecond,
5881 PGMACCESSORIGIN_IEM);
5882 if (rcStrict2 == VINF_SUCCESS)
5883 {
5884 LogEx(LOG_GROUP_IEM,
5885 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5888 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5889 }
5890 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5891 {
5892 LogEx(LOG_GROUP_IEM,
5893 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5896 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5898 }
5899#ifndef IN_RING3
5900 else if (fPostponeFail)
5901 {
5902 LogEx(LOG_GROUP_IEM,
5903 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5906 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5907 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5908 return iemSetPassUpStatus(pVCpu, rcStrict);
5909 }
5910#endif
5911 else
5912 {
5913 LogEx(LOG_GROUP_IEM,
5914 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5915 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5916 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5917 return rcStrict2;
5918 }
5919 }
5920 }
5921#ifndef IN_RING3
5922 else if (fPostponeFail)
5923 {
5924 LogEx(LOG_GROUP_IEM,
5925 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5928 if (!cbSecond)
5929 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5930 else
5931 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5932 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5933 return iemSetPassUpStatus(pVCpu, rcStrict);
5934 }
5935#endif
5936 else
5937 {
5938 LogEx(LOG_GROUP_IEM,
5939 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5940 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5942 return rcStrict;
5943 }
5944 }
5945 else
5946 {
5947 /*
5948 * No access handlers, much simpler.
5949 */
5950 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5951 if (RT_SUCCESS(rc))
5952 {
5953 if (cbSecond)
5954 {
5955 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5956 if (RT_SUCCESS(rc))
5957 { /* likely */ }
5958 else
5959 {
5960 LogEx(LOG_GROUP_IEM,
5961 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5964 return rc;
5965 }
5966 }
5967 }
5968 else
5969 {
5970 LogEx(LOG_GROUP_IEM,
5971 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5972 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5973 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5974 return rc;
5975 }
5976 }
5977 }
5978
5979#if defined(IEM_LOG_MEMORY_WRITES)
5980 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5981 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5982 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5983 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5984 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5985 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5986
5987 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5988 g_cbIemWrote = cbWrote;
5989 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5990#endif
5991
5992 /*
5993 * Free the mapping entry.
5994 */
5995 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5996 Assert(pVCpu->iem.s.cActiveMappings != 0);
5997 pVCpu->iem.s.cActiveMappings--;
5998 return VINF_SUCCESS;
5999}
6000
6001
6002/**
6003 * iemMemMap worker that deals with a request crossing pages.
6004 */
6005static VBOXSTRICTRC
6006iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6007 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6008{
6009 Assert(cbMem <= GUEST_PAGE_SIZE);
6010
6011 /*
6012 * Do the address translations.
6013 */
6014 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6015 RTGCPHYS GCPhysFirst;
6016 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6017 if (rcStrict != VINF_SUCCESS)
6018 return rcStrict;
6019 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6020
6021 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6022 RTGCPHYS GCPhysSecond;
6023 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6024 cbSecondPage, fAccess, &GCPhysSecond);
6025 if (rcStrict != VINF_SUCCESS)
6026 return rcStrict;
6027 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6028 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6029
6030 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6031
6032 /*
6033 * Read in the current memory content if it's a read, execute or partial
6034 * write access.
6035 */
6036 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6037
6038 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6039 {
6040 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6041 {
6042 /*
6043 * Must carefully deal with access handler status codes here,
6044 * makes the code a bit bloated.
6045 */
6046 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6047 if (rcStrict == VINF_SUCCESS)
6048 {
6049 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6050 if (rcStrict == VINF_SUCCESS)
6051 { /*likely */ }
6052 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6053 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6054 else
6055 {
6056 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6057 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6058 return rcStrict;
6059 }
6060 }
6061 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6062 {
6063 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6064 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6065 {
6066 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6068 }
6069 else
6070 {
6071 LogEx(LOG_GROUP_IEM,
6072 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6073 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6074 return rcStrict2;
6075 }
6076 }
6077 else
6078 {
6079 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6080 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6081 return rcStrict;
6082 }
6083 }
6084 else
6085 {
6086 /*
6087 * No informational status codes here, much more straight forward.
6088 */
6089 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6090 if (RT_SUCCESS(rc))
6091 {
6092 Assert(rc == VINF_SUCCESS);
6093 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6094 if (RT_SUCCESS(rc))
6095 Assert(rc == VINF_SUCCESS);
6096 else
6097 {
6098 LogEx(LOG_GROUP_IEM,
6099 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6100 return rc;
6101 }
6102 }
6103 else
6104 {
6105 LogEx(LOG_GROUP_IEM,
6106 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6107 return rc;
6108 }
6109 }
6110 }
6111#ifdef VBOX_STRICT
6112 else
6113 memset(pbBuf, 0xcc, cbMem);
6114 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6115 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6116#endif
6117 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6118
6119 /*
6120 * Commit the bounce buffer entry.
6121 */
6122 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6123 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6124 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6125 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6126 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6127 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6128 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6129 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6130 pVCpu->iem.s.cActiveMappings++;
6131
6132 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6133 *ppvMem = pbBuf;
6134 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6135 return VINF_SUCCESS;
6136}
6137
6138
6139/**
6140 * iemMemMap woker that deals with iemMemPageMap failures.
6141 */
6142static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6143 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6144{
6145 /*
6146 * Filter out conditions we can handle and the ones which shouldn't happen.
6147 */
6148 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6149 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6150 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6151 {
6152 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6153 return rcMap;
6154 }
6155 pVCpu->iem.s.cPotentialExits++;
6156
6157 /*
6158 * Read in the current memory content if it's a read, execute or partial
6159 * write access.
6160 */
6161 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6162 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6163 {
6164 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6165 memset(pbBuf, 0xff, cbMem);
6166 else
6167 {
6168 int rc;
6169 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6170 {
6171 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6172 if (rcStrict == VINF_SUCCESS)
6173 { /* nothing */ }
6174 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6175 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6176 else
6177 {
6178 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6179 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6180 return rcStrict;
6181 }
6182 }
6183 else
6184 {
6185 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6186 if (RT_SUCCESS(rc))
6187 { /* likely */ }
6188 else
6189 {
6190 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6191 GCPhysFirst, rc));
6192 return rc;
6193 }
6194 }
6195 }
6196 }
6197#ifdef VBOX_STRICT
6198 else
6199 memset(pbBuf, 0xcc, cbMem);
6200#endif
6201#ifdef VBOX_STRICT
6202 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6203 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6204#endif
6205
6206 /*
6207 * Commit the bounce buffer entry.
6208 */
6209 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6210 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6211 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6212 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6213 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6214 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6215 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6216 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6217 pVCpu->iem.s.cActiveMappings++;
6218
6219 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6220 *ppvMem = pbBuf;
6221 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6222 return VINF_SUCCESS;
6223}
6224
6225
6226
6227/**
6228 * Maps the specified guest memory for the given kind of access.
6229 *
6230 * This may be using bounce buffering of the memory if it's crossing a page
6231 * boundary or if there is an access handler installed for any of it. Because
6232 * of lock prefix guarantees, we're in for some extra clutter when this
6233 * happens.
6234 *
6235 * This may raise a \#GP, \#SS, \#PF or \#AC.
6236 *
6237 * @returns VBox strict status code.
6238 *
6239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6240 * @param ppvMem Where to return the pointer to the mapped memory.
6241 * @param pbUnmapInfo Where to return unmap info to be passed to
6242 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6243 * done.
6244 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6245 * 8, 12, 16, 32 or 512. When used by string operations
6246 * it can be up to a page.
6247 * @param iSegReg The index of the segment register to use for this
6248 * access. The base and limits are checked. Use UINT8_MAX
6249 * to indicate that no segmentation is required (for IDT,
6250 * GDT and LDT accesses).
6251 * @param GCPtrMem The address of the guest memory.
6252 * @param fAccess How the memory is being accessed. The
6253 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6254 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6255 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6256 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6257 * set.
6258 * @param uAlignCtl Alignment control:
6259 * - Bits 15:0 is the alignment mask.
6260 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6261 * IEM_MEMMAP_F_ALIGN_SSE, and
6262 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6263 * Pass zero to skip alignment.
6264 */
6265VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6266 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6267{
6268 /*
6269 * Check the input and figure out which mapping entry to use.
6270 */
6271 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6272 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6273 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6274 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6275 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6276
6277 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6278 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6279 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6280 {
6281 iMemMap = iemMemMapFindFree(pVCpu);
6282 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6283 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6284 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6285 pVCpu->iem.s.aMemMappings[2].fAccess),
6286 VERR_IEM_IPE_9);
6287 }
6288
6289 /*
6290 * Map the memory, checking that we can actually access it. If something
6291 * slightly complicated happens, fall back on bounce buffering.
6292 */
6293 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6294 if (rcStrict == VINF_SUCCESS)
6295 { /* likely */ }
6296 else
6297 return rcStrict;
6298
6299 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6300 { /* likely */ }
6301 else
6302 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6303
6304 /*
6305 * Alignment check.
6306 */
6307 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6308 { /* likelyish */ }
6309 else
6310 {
6311 /* Misaligned access. */
6312 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6313 {
6314 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6315 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6316 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6317 {
6318 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6319
6320 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6321 return iemRaiseAlignmentCheckException(pVCpu);
6322 }
6323 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6324 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6325 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6326 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6327 * that's what FXSAVE does on a 10980xe. */
6328 && iemMemAreAlignmentChecksEnabled(pVCpu))
6329 return iemRaiseAlignmentCheckException(pVCpu);
6330 else
6331 return iemRaiseGeneralProtectionFault0(pVCpu);
6332 }
6333
6334#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6335 /* If the access is atomic there are host platform alignmnet restrictions
6336 we need to conform with. */
6337 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6338# if defined(RT_ARCH_AMD64)
6339 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6340# elif defined(RT_ARCH_ARM64)
6341 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6342# else
6343# error port me
6344# endif
6345 )
6346 { /* okay */ }
6347 else
6348 {
6349 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6350 pVCpu->iem.s.cMisalignedAtomics += 1;
6351 return VINF_EM_EMULATE_SPLIT_LOCK;
6352 }
6353#endif
6354 }
6355
6356#ifdef IEM_WITH_DATA_TLB
6357 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6358
6359 /*
6360 * Get the TLB entry for this page.
6361 */
6362 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6363 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6364 if (pTlbe->uTag == uTag)
6365 {
6366# ifdef VBOX_WITH_STATISTICS
6367 pVCpu->iem.s.DataTlb.cTlbHits++;
6368# endif
6369 }
6370 else
6371 {
6372 pVCpu->iem.s.DataTlb.cTlbMisses++;
6373 PGMPTWALK Walk;
6374 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6375 if (RT_FAILURE(rc))
6376 {
6377 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6378# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6379 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6380 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6381# endif
6382 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6383 }
6384
6385 Assert(Walk.fSucceeded);
6386 pTlbe->uTag = uTag;
6387 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6388 pTlbe->GCPhys = Walk.GCPhys;
6389 pTlbe->pbMappingR3 = NULL;
6390 }
6391
6392 /*
6393 * Check TLB page table level access flags.
6394 */
6395 /* If the page is either supervisor only or non-writable, we need to do
6396 more careful access checks. */
6397 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6398 {
6399 /* Write to read only memory? */
6400 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6401 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6402 && ( ( IEM_GET_CPL(pVCpu) == 3
6403 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6404 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6405 {
6406 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6407# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6408 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6409 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6410# endif
6411 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6412 }
6413
6414 /* Kernel memory accessed by userland? */
6415 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6416 && IEM_GET_CPL(pVCpu) == 3
6417 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6418 {
6419 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6420# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6421 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6422 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6423# endif
6424 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6425 }
6426 }
6427
6428 /*
6429 * Set the dirty / access flags.
6430 * ASSUMES this is set when the address is translated rather than on commit...
6431 */
6432 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6433 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6434 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6435 {
6436 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6437 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6438 AssertRC(rc2);
6439 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6440 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6441 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6442 }
6443
6444 /*
6445 * Look up the physical page info if necessary.
6446 */
6447 uint8_t *pbMem = NULL;
6448 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6449# ifdef IN_RING3
6450 pbMem = pTlbe->pbMappingR3;
6451# else
6452 pbMem = NULL;
6453# endif
6454 else
6455 {
6456 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6457 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6458 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6459 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6460 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6461 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6462 { /* likely */ }
6463 else
6464 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6465 pTlbe->pbMappingR3 = NULL;
6466 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6467 | IEMTLBE_F_NO_MAPPINGR3
6468 | IEMTLBE_F_PG_NO_READ
6469 | IEMTLBE_F_PG_NO_WRITE
6470 | IEMTLBE_F_PG_UNASSIGNED
6471 | IEMTLBE_F_PG_CODE_PAGE);
6472 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6473 &pbMem, &pTlbe->fFlagsAndPhysRev);
6474 AssertRCReturn(rc, rc);
6475# ifdef IN_RING3
6476 pTlbe->pbMappingR3 = pbMem;
6477# endif
6478 }
6479
6480 /*
6481 * Check the physical page level access and mapping.
6482 */
6483 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6484 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6485 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6486 { /* probably likely */ }
6487 else
6488 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6489 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6490 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6491 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6492 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6493 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6494
6495 if (pbMem)
6496 {
6497 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6498 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6499 fAccess |= IEM_ACCESS_NOT_LOCKED;
6500 }
6501 else
6502 {
6503 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6504 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6505 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6506 if (rcStrict != VINF_SUCCESS)
6507 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6508 }
6509
6510 void * const pvMem = pbMem;
6511
6512 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6513 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6514 if (fAccess & IEM_ACCESS_TYPE_READ)
6515 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6516
6517#else /* !IEM_WITH_DATA_TLB */
6518
6519 RTGCPHYS GCPhysFirst;
6520 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6521 if (rcStrict != VINF_SUCCESS)
6522 return rcStrict;
6523
6524 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6525 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6526 if (fAccess & IEM_ACCESS_TYPE_READ)
6527 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6528
6529 void *pvMem;
6530 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6531 if (rcStrict != VINF_SUCCESS)
6532 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6533
6534#endif /* !IEM_WITH_DATA_TLB */
6535
6536 /*
6537 * Fill in the mapping table entry.
6538 */
6539 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6540 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6541 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6542 pVCpu->iem.s.cActiveMappings += 1;
6543
6544 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6545 *ppvMem = pvMem;
6546 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6547 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6549
6550 return VINF_SUCCESS;
6551}
6552
6553
6554/**
6555 * Commits the guest memory if bounce buffered and unmaps it.
6556 *
6557 * @returns Strict VBox status code.
6558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6559 * @param bUnmapInfo Unmap info set by iemMemMap.
6560 */
6561VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6562{
6563 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6564 AssertMsgReturn( (bUnmapInfo & 0x08)
6565 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6566 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6567 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6568 VERR_NOT_FOUND);
6569
6570 /* If it's bounce buffered, we may need to write back the buffer. */
6571 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6572 {
6573 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6574 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6575 }
6576 /* Otherwise unlock it. */
6577 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6578 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6579
6580 /* Free the entry. */
6581 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6582 Assert(pVCpu->iem.s.cActiveMappings != 0);
6583 pVCpu->iem.s.cActiveMappings--;
6584 return VINF_SUCCESS;
6585}
6586
6587
6588/**
6589 * Rolls back the guest memory (conceptually only) and unmaps it.
6590 *
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param bUnmapInfo Unmap info set by iemMemMap.
6593 */
6594void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6595{
6596 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6597 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6598 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6599 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6600 == ((unsigned)bUnmapInfo >> 4),
6601 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6602
6603 /* Unlock it if necessary. */
6604 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6605 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6606
6607 /* Free the entry. */
6608 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6609 Assert(pVCpu->iem.s.cActiveMappings != 0);
6610 pVCpu->iem.s.cActiveMappings--;
6611}
6612
6613#ifdef IEM_WITH_SETJMP
6614
6615/**
6616 * Maps the specified guest memory for the given kind of access, longjmp on
6617 * error.
6618 *
6619 * This may be using bounce buffering of the memory if it's crossing a page
6620 * boundary or if there is an access handler installed for any of it. Because
6621 * of lock prefix guarantees, we're in for some extra clutter when this
6622 * happens.
6623 *
6624 * This may raise a \#GP, \#SS, \#PF or \#AC.
6625 *
6626 * @returns Pointer to the mapped memory.
6627 *
6628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6629 * @param bUnmapInfo Where to return unmap info to be passed to
6630 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6631 * iemMemCommitAndUnmapWoSafeJmp,
6632 * iemMemCommitAndUnmapRoSafeJmp,
6633 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6634 * when done.
6635 * @param cbMem The number of bytes to map. This is usually 1,
6636 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6637 * string operations it can be up to a page.
6638 * @param iSegReg The index of the segment register to use for
6639 * this access. The base and limits are checked.
6640 * Use UINT8_MAX to indicate that no segmentation
6641 * is required (for IDT, GDT and LDT accesses).
6642 * @param GCPtrMem The address of the guest memory.
6643 * @param fAccess How the memory is being accessed. The
6644 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6645 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6646 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6647 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6648 * set.
6649 * @param uAlignCtl Alignment control:
6650 * - Bits 15:0 is the alignment mask.
6651 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6652 * IEM_MEMMAP_F_ALIGN_SSE, and
6653 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6654 * Pass zero to skip alignment.
6655 */
6656void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6657 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6658{
6659 /*
6660 * Check the input, check segment access and adjust address
6661 * with segment base.
6662 */
6663 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6664 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6665 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6666
6667 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6668 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6669 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6670
6671 /*
6672 * Alignment check.
6673 */
6674 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6675 { /* likelyish */ }
6676 else
6677 {
6678 /* Misaligned access. */
6679 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6680 {
6681 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6682 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6683 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6684 {
6685 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6686
6687 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6688 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6689 }
6690 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6691 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6692 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6693 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6694 * that's what FXSAVE does on a 10980xe. */
6695 && iemMemAreAlignmentChecksEnabled(pVCpu))
6696 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6697 else
6698 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6699 }
6700
6701#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6702 /* If the access is atomic there are host platform alignmnet restrictions
6703 we need to conform with. */
6704 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6705# if defined(RT_ARCH_AMD64)
6706 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6707# elif defined(RT_ARCH_ARM64)
6708 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6709# else
6710# error port me
6711# endif
6712 )
6713 { /* okay */ }
6714 else
6715 {
6716 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6717 pVCpu->iem.s.cMisalignedAtomics += 1;
6718 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6719 }
6720#endif
6721 }
6722
6723 /*
6724 * Figure out which mapping entry to use.
6725 */
6726 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6727 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6728 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6729 {
6730 iMemMap = iemMemMapFindFree(pVCpu);
6731 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6732 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6733 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6734 pVCpu->iem.s.aMemMappings[2].fAccess),
6735 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6736 }
6737
6738 /*
6739 * Crossing a page boundary?
6740 */
6741 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6742 { /* No (likely). */ }
6743 else
6744 {
6745 void *pvMem;
6746 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6747 if (rcStrict == VINF_SUCCESS)
6748 return pvMem;
6749 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6750 }
6751
6752#ifdef IEM_WITH_DATA_TLB
6753 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6754
6755 /*
6756 * Get the TLB entry for this page.
6757 */
6758 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6759 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6760 if (pTlbe->uTag == uTag)
6761 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6762 else
6763 {
6764 pVCpu->iem.s.DataTlb.cTlbMisses++;
6765 PGMPTWALK Walk;
6766 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6767 if (RT_FAILURE(rc))
6768 {
6769 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6770# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6771 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6772 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6773# endif
6774 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6775 }
6776
6777 Assert(Walk.fSucceeded);
6778 pTlbe->uTag = uTag;
6779 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6780 pTlbe->GCPhys = Walk.GCPhys;
6781 pTlbe->pbMappingR3 = NULL;
6782 }
6783
6784 /*
6785 * Check the flags and physical revision.
6786 */
6787 /** @todo make the caller pass these in with fAccess. */
6788 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6789 ? IEMTLBE_F_PT_NO_USER : 0;
6790 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6791 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6792 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6793 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6794 ? IEMTLBE_F_PT_NO_WRITE : 0)
6795 : 0;
6796 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6797 uint8_t *pbMem = NULL;
6798 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6799 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6800# ifdef IN_RING3
6801 pbMem = pTlbe->pbMappingR3;
6802# else
6803 pbMem = NULL;
6804# endif
6805 else
6806 {
6807 /*
6808 * Okay, something isn't quite right or needs refreshing.
6809 */
6810 /* Write to read only memory? */
6811 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6812 {
6813 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6814# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6815 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6816 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6817# endif
6818 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6819 }
6820
6821 /* Kernel memory accessed by userland? */
6822 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6823 {
6824 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6825# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6826 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6827 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6828# endif
6829 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6830 }
6831
6832 /* Set the dirty / access flags.
6833 ASSUMES this is set when the address is translated rather than on commit... */
6834 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6835 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6836 {
6837 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6838 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6839 AssertRC(rc2);
6840 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6841 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6842 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6843 }
6844
6845 /*
6846 * Check if the physical page info needs updating.
6847 */
6848 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6849# ifdef IN_RING3
6850 pbMem = pTlbe->pbMappingR3;
6851# else
6852 pbMem = NULL;
6853# endif
6854 else
6855 {
6856 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6857 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6858 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6859 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6860 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6861 pTlbe->pbMappingR3 = NULL;
6862 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6863 | IEMTLBE_F_NO_MAPPINGR3
6864 | IEMTLBE_F_PG_NO_READ
6865 | IEMTLBE_F_PG_NO_WRITE
6866 | IEMTLBE_F_PG_UNASSIGNED
6867 | IEMTLBE_F_PG_CODE_PAGE);
6868 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6869 &pbMem, &pTlbe->fFlagsAndPhysRev);
6870 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6871# ifdef IN_RING3
6872 pTlbe->pbMappingR3 = pbMem;
6873# endif
6874 }
6875
6876 /*
6877 * Check the physical page level access and mapping.
6878 */
6879 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6880 { /* probably likely */ }
6881 else
6882 {
6883 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6884 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6885 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6886 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6887 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6888 if (rcStrict == VINF_SUCCESS)
6889 return pbMem;
6890 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6891 }
6892 }
6893 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6894
6895 if (pbMem)
6896 {
6897 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6898 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6899 fAccess |= IEM_ACCESS_NOT_LOCKED;
6900 }
6901 else
6902 {
6903 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6904 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6905 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6906 if (rcStrict == VINF_SUCCESS)
6907 {
6908 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6909 return pbMem;
6910 }
6911 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6912 }
6913
6914 void * const pvMem = pbMem;
6915
6916 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6917 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6918 if (fAccess & IEM_ACCESS_TYPE_READ)
6919 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6920
6921#else /* !IEM_WITH_DATA_TLB */
6922
6923
6924 RTGCPHYS GCPhysFirst;
6925 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6926 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6927 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6928
6929 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6930 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6931 if (fAccess & IEM_ACCESS_TYPE_READ)
6932 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6933
6934 void *pvMem;
6935 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6936 if (rcStrict == VINF_SUCCESS)
6937 { /* likely */ }
6938 else
6939 {
6940 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6941 if (rcStrict == VINF_SUCCESS)
6942 return pvMem;
6943 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6944 }
6945
6946#endif /* !IEM_WITH_DATA_TLB */
6947
6948 /*
6949 * Fill in the mapping table entry.
6950 */
6951 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6952 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6953 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6954 pVCpu->iem.s.cActiveMappings++;
6955
6956 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6957
6958 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6959 return pvMem;
6960}
6961
6962
6963/**
6964 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6965 *
6966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6967 * @param pvMem The mapping.
6968 * @param fAccess The kind of access.
6969 */
6970void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6971{
6972 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6973 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6974 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6975 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6976 == ((unsigned)bUnmapInfo >> 4),
6977 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6978
6979 /* If it's bounce buffered, we may need to write back the buffer. */
6980 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6981 {
6982 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6983 {
6984 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6985 if (rcStrict == VINF_SUCCESS)
6986 return;
6987 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6988 }
6989 }
6990 /* Otherwise unlock it. */
6991 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6992 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6993
6994 /* Free the entry. */
6995 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6996 Assert(pVCpu->iem.s.cActiveMappings != 0);
6997 pVCpu->iem.s.cActiveMappings--;
6998}
6999
7000
7001/** Fallback for iemMemCommitAndUnmapRwJmp. */
7002void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7003{
7004 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7005 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7006}
7007
7008
7009/** Fallback for iemMemCommitAndUnmapAtJmp. */
7010void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7011{
7012 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7013 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7014}
7015
7016
7017/** Fallback for iemMemCommitAndUnmapWoJmp. */
7018void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7019{
7020 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7021 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7022}
7023
7024
7025/** Fallback for iemMemCommitAndUnmapRoJmp. */
7026void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7027{
7028 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7029 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7030}
7031
7032
7033/** Fallback for iemMemRollbackAndUnmapWo. */
7034void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7035{
7036 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7037 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7038}
7039
7040#endif /* IEM_WITH_SETJMP */
7041
7042#ifndef IN_RING3
7043/**
7044 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7045 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7046 *
7047 * Allows the instruction to be completed and retired, while the IEM user will
7048 * return to ring-3 immediately afterwards and do the postponed writes there.
7049 *
7050 * @returns VBox status code (no strict statuses). Caller must check
7051 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7053 * @param pvMem The mapping.
7054 * @param fAccess The kind of access.
7055 */
7056VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7057{
7058 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7059 AssertMsgReturn( (bUnmapInfo & 0x08)
7060 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7061 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7062 == ((unsigned)bUnmapInfo >> 4),
7063 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7064 VERR_NOT_FOUND);
7065
7066 /* If it's bounce buffered, we may need to write back the buffer. */
7067 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7068 {
7069 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7070 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7071 }
7072 /* Otherwise unlock it. */
7073 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7074 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7075
7076 /* Free the entry. */
7077 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7078 Assert(pVCpu->iem.s.cActiveMappings != 0);
7079 pVCpu->iem.s.cActiveMappings--;
7080 return VINF_SUCCESS;
7081}
7082#endif
7083
7084
7085/**
7086 * Rollbacks mappings, releasing page locks and such.
7087 *
7088 * The caller shall only call this after checking cActiveMappings.
7089 *
7090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7091 */
7092void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7093{
7094 Assert(pVCpu->iem.s.cActiveMappings > 0);
7095
7096 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7097 while (iMemMap-- > 0)
7098 {
7099 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7100 if (fAccess != IEM_ACCESS_INVALID)
7101 {
7102 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7103 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7104 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7105 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7106 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7107 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7108 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7109 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7110 pVCpu->iem.s.cActiveMappings--;
7111 }
7112 }
7113}
7114
7115
7116/*
7117 * Instantiate R/W templates.
7118 */
7119#define TMPL_MEM_WITH_STACK
7120
7121#define TMPL_MEM_TYPE uint8_t
7122#define TMPL_MEM_FN_SUFF U8
7123#define TMPL_MEM_FMT_TYPE "%#04x"
7124#define TMPL_MEM_FMT_DESC "byte"
7125#include "IEMAllMemRWTmpl.cpp.h"
7126
7127#define TMPL_MEM_TYPE uint16_t
7128#define TMPL_MEM_FN_SUFF U16
7129#define TMPL_MEM_FMT_TYPE "%#06x"
7130#define TMPL_MEM_FMT_DESC "word"
7131#include "IEMAllMemRWTmpl.cpp.h"
7132
7133#define TMPL_WITH_PUSH_SREG
7134#define TMPL_MEM_TYPE uint32_t
7135#define TMPL_MEM_FN_SUFF U32
7136#define TMPL_MEM_FMT_TYPE "%#010x"
7137#define TMPL_MEM_FMT_DESC "dword"
7138#include "IEMAllMemRWTmpl.cpp.h"
7139#undef TMPL_WITH_PUSH_SREG
7140
7141#define TMPL_MEM_TYPE uint64_t
7142#define TMPL_MEM_FN_SUFF U64
7143#define TMPL_MEM_FMT_TYPE "%#018RX64"
7144#define TMPL_MEM_FMT_DESC "qword"
7145#include "IEMAllMemRWTmpl.cpp.h"
7146
7147#undef TMPL_MEM_WITH_STACK
7148
7149#define TMPL_MEM_TYPE uint64_t
7150#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7151#define TMPL_MEM_FN_SUFF U64AlignedU128
7152#define TMPL_MEM_FMT_TYPE "%#018RX64"
7153#define TMPL_MEM_FMT_DESC "qword"
7154#include "IEMAllMemRWTmpl.cpp.h"
7155
7156/* See IEMAllMemRWTmplInline.cpp.h */
7157#define TMPL_MEM_BY_REF
7158
7159#define TMPL_MEM_TYPE RTFLOAT80U
7160#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7161#define TMPL_MEM_FN_SUFF R80
7162#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7163#define TMPL_MEM_FMT_DESC "tword"
7164#include "IEMAllMemRWTmpl.cpp.h"
7165
7166#define TMPL_MEM_TYPE RTPBCD80U
7167#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7168#define TMPL_MEM_FN_SUFF D80
7169#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7170#define TMPL_MEM_FMT_DESC "tword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172
7173#define TMPL_MEM_TYPE RTUINT128U
7174#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7175#define TMPL_MEM_FN_SUFF U128
7176#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7177#define TMPL_MEM_FMT_DESC "dqword"
7178#include "IEMAllMemRWTmpl.cpp.h"
7179
7180#define TMPL_MEM_TYPE RTUINT128U
7181#define TMPL_MEM_TYPE_ALIGN 0
7182#define TMPL_MEM_FN_SUFF U128NoAc
7183#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7184#define TMPL_MEM_FMT_DESC "dqword"
7185#include "IEMAllMemRWTmpl.cpp.h"
7186
7187/**
7188 * Fetches a data dword and zero extends it to a qword.
7189 *
7190 * @returns Strict VBox status code.
7191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7192 * @param pu64Dst Where to return the qword.
7193 * @param iSegReg The index of the segment register to use for
7194 * this access. The base and limits are checked.
7195 * @param GCPtrMem The address of the guest memory.
7196 */
7197VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7198{
7199 /* The lazy approach for now... */
7200 uint8_t bUnmapInfo;
7201 uint32_t const *pu32Src;
7202 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7203 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7204 if (rc == VINF_SUCCESS)
7205 {
7206 *pu64Dst = *pu32Src;
7207 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7208 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7209 }
7210 return rc;
7211}
7212
7213
7214#ifdef SOME_UNUSED_FUNCTION
7215/**
7216 * Fetches a data dword and sign extends it to a qword.
7217 *
7218 * @returns Strict VBox status code.
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pu64Dst Where to return the sign extended value.
7221 * @param iSegReg The index of the segment register to use for
7222 * this access. The base and limits are checked.
7223 * @param GCPtrMem The address of the guest memory.
7224 */
7225VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7226{
7227 /* The lazy approach for now... */
7228 uint8_t bUnmapInfo;
7229 int32_t const *pi32Src;
7230 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7231 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7232 if (rc == VINF_SUCCESS)
7233 {
7234 *pu64Dst = *pi32Src;
7235 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7236 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7237 }
7238#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7239 else
7240 *pu64Dst = 0;
7241#endif
7242 return rc;
7243}
7244#endif
7245
7246
7247/**
7248 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7249 * related.
7250 *
7251 * Raises \#GP(0) if not aligned.
7252 *
7253 * @returns Strict VBox status code.
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pu128Dst Where to return the qword.
7256 * @param iSegReg The index of the segment register to use for
7257 * this access. The base and limits are checked.
7258 * @param GCPtrMem The address of the guest memory.
7259 */
7260VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7261{
7262 /* The lazy approach for now... */
7263 uint8_t bUnmapInfo;
7264 PCRTUINT128U pu128Src;
7265 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7266 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7267 if (rc == VINF_SUCCESS)
7268 {
7269 pu128Dst->au64[0] = pu128Src->au64[0];
7270 pu128Dst->au64[1] = pu128Src->au64[1];
7271 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7272 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7273 }
7274 return rc;
7275}
7276
7277
7278#ifdef IEM_WITH_SETJMP
7279/**
7280 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7281 * related, longjmp on error.
7282 *
7283 * Raises \#GP(0) if not aligned.
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 * @param pu128Dst Where to return the qword.
7287 * @param iSegReg The index of the segment register to use for
7288 * this access. The base and limits are checked.
7289 * @param GCPtrMem The address of the guest memory.
7290 */
7291void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7292 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7293{
7294 /* The lazy approach for now... */
7295 uint8_t bUnmapInfo;
7296 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7297 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7298 pu128Dst->au64[0] = pu128Src->au64[0];
7299 pu128Dst->au64[1] = pu128Src->au64[1];
7300 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7301 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7302}
7303#endif
7304
7305
7306/**
7307 * Fetches a data oword (octo word), generally AVX related.
7308 *
7309 * @returns Strict VBox status code.
7310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7311 * @param pu256Dst Where to return the qword.
7312 * @param iSegReg The index of the segment register to use for
7313 * this access. The base and limits are checked.
7314 * @param GCPtrMem The address of the guest memory.
7315 */
7316VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7317{
7318 /* The lazy approach for now... */
7319 uint8_t bUnmapInfo;
7320 PCRTUINT256U pu256Src;
7321 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7322 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7323 if (rc == VINF_SUCCESS)
7324 {
7325 pu256Dst->au64[0] = pu256Src->au64[0];
7326 pu256Dst->au64[1] = pu256Src->au64[1];
7327 pu256Dst->au64[2] = pu256Src->au64[2];
7328 pu256Dst->au64[3] = pu256Src->au64[3];
7329 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7330 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7331 }
7332 return rc;
7333}
7334
7335
7336#ifdef IEM_WITH_SETJMP
7337/**
7338 * Fetches a data oword (octo word), generally AVX related.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pu256Dst Where to return the qword.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 */
7346void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7347{
7348 /* The lazy approach for now... */
7349 uint8_t bUnmapInfo;
7350 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7351 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7352 pu256Dst->au64[0] = pu256Src->au64[0];
7353 pu256Dst->au64[1] = pu256Src->au64[1];
7354 pu256Dst->au64[2] = pu256Src->au64[2];
7355 pu256Dst->au64[3] = pu256Src->au64[3];
7356 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7357 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7358}
7359#endif
7360
7361
7362/**
7363 * Fetches a data oword (octo word) at an aligned address, generally AVX
7364 * related.
7365 *
7366 * Raises \#GP(0) if not aligned.
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param pu256Dst Where to return the qword.
7371 * @param iSegReg The index of the segment register to use for
7372 * this access. The base and limits are checked.
7373 * @param GCPtrMem The address of the guest memory.
7374 */
7375VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7376{
7377 /* The lazy approach for now... */
7378 uint8_t bUnmapInfo;
7379 PCRTUINT256U pu256Src;
7380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7381 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7382 if (rc == VINF_SUCCESS)
7383 {
7384 pu256Dst->au64[0] = pu256Src->au64[0];
7385 pu256Dst->au64[1] = pu256Src->au64[1];
7386 pu256Dst->au64[2] = pu256Src->au64[2];
7387 pu256Dst->au64[3] = pu256Src->au64[3];
7388 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7389 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7390 }
7391 return rc;
7392}
7393
7394
7395#ifdef IEM_WITH_SETJMP
7396/**
7397 * Fetches a data oword (octo word) at an aligned address, generally AVX
7398 * related, longjmp on error.
7399 *
7400 * Raises \#GP(0) if not aligned.
7401 *
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param pu256Dst Where to return the qword.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 */
7408void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7409 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7410{
7411 /* The lazy approach for now... */
7412 uint8_t bUnmapInfo;
7413 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7414 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7415 pu256Dst->au64[0] = pu256Src->au64[0];
7416 pu256Dst->au64[1] = pu256Src->au64[1];
7417 pu256Dst->au64[2] = pu256Src->au64[2];
7418 pu256Dst->au64[3] = pu256Src->au64[3];
7419 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7420 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7421}
7422#endif
7423
7424
7425
7426/**
7427 * Fetches a descriptor register (lgdt, lidt).
7428 *
7429 * @returns Strict VBox status code.
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 * @param pcbLimit Where to return the limit.
7432 * @param pGCPtrBase Where to return the base.
7433 * @param iSegReg The index of the segment register to use for
7434 * this access. The base and limits are checked.
7435 * @param GCPtrMem The address of the guest memory.
7436 * @param enmOpSize The effective operand size.
7437 */
7438VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7439 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7440{
7441 /*
7442 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7443 * little special:
7444 * - The two reads are done separately.
7445 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7446 * - We suspect the 386 to actually commit the limit before the base in
7447 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7448 * don't try emulate this eccentric behavior, because it's not well
7449 * enough understood and rather hard to trigger.
7450 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7451 */
7452 VBOXSTRICTRC rcStrict;
7453 if (IEM_IS_64BIT_CODE(pVCpu))
7454 {
7455 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7456 if (rcStrict == VINF_SUCCESS)
7457 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7458 }
7459 else
7460 {
7461 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7462 if (enmOpSize == IEMMODE_32BIT)
7463 {
7464 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7465 {
7466 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7467 if (rcStrict == VINF_SUCCESS)
7468 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7469 }
7470 else
7471 {
7472 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7473 if (rcStrict == VINF_SUCCESS)
7474 {
7475 *pcbLimit = (uint16_t)uTmp;
7476 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7477 }
7478 }
7479 if (rcStrict == VINF_SUCCESS)
7480 *pGCPtrBase = uTmp;
7481 }
7482 else
7483 {
7484 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7485 if (rcStrict == VINF_SUCCESS)
7486 {
7487 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7488 if (rcStrict == VINF_SUCCESS)
7489 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7490 }
7491 }
7492 }
7493 return rcStrict;
7494}
7495
7496
7497/**
7498 * Stores a data dqword, SSE aligned.
7499 *
7500 * @returns Strict VBox status code.
7501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7502 * @param iSegReg The index of the segment register to use for
7503 * this access. The base and limits are checked.
7504 * @param GCPtrMem The address of the guest memory.
7505 * @param u128Value The value to store.
7506 */
7507VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7508{
7509 /* The lazy approach for now... */
7510 uint8_t bUnmapInfo;
7511 PRTUINT128U pu128Dst;
7512 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7513 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7514 if (rc == VINF_SUCCESS)
7515 {
7516 pu128Dst->au64[0] = u128Value.au64[0];
7517 pu128Dst->au64[1] = u128Value.au64[1];
7518 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7519 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7520 }
7521 return rc;
7522}
7523
7524
7525#ifdef IEM_WITH_SETJMP
7526/**
7527 * Stores a data dqword, SSE aligned.
7528 *
7529 * @returns Strict VBox status code.
7530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7531 * @param iSegReg The index of the segment register to use for
7532 * this access. The base and limits are checked.
7533 * @param GCPtrMem The address of the guest memory.
7534 * @param u128Value The value to store.
7535 */
7536void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7537 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7538{
7539 /* The lazy approach for now... */
7540 uint8_t bUnmapInfo;
7541 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7542 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7543 pu128Dst->au64[0] = u128Value.au64[0];
7544 pu128Dst->au64[1] = u128Value.au64[1];
7545 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7546 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7547}
7548#endif
7549
7550
7551/**
7552 * Stores a data dqword.
7553 *
7554 * @returns Strict VBox status code.
7555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7556 * @param iSegReg The index of the segment register to use for
7557 * this access. The base and limits are checked.
7558 * @param GCPtrMem The address of the guest memory.
7559 * @param pu256Value Pointer to the value to store.
7560 */
7561VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7562{
7563 /* The lazy approach for now... */
7564 uint8_t bUnmapInfo;
7565 PRTUINT256U pu256Dst;
7566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7567 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7568 if (rc == VINF_SUCCESS)
7569 {
7570 pu256Dst->au64[0] = pu256Value->au64[0];
7571 pu256Dst->au64[1] = pu256Value->au64[1];
7572 pu256Dst->au64[2] = pu256Value->au64[2];
7573 pu256Dst->au64[3] = pu256Value->au64[3];
7574 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7575 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7576 }
7577 return rc;
7578}
7579
7580
7581#ifdef IEM_WITH_SETJMP
7582/**
7583 * Stores a data dqword, longjmp on error.
7584 *
7585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7586 * @param iSegReg The index of the segment register to use for
7587 * this access. The base and limits are checked.
7588 * @param GCPtrMem The address of the guest memory.
7589 * @param pu256Value Pointer to the value to store.
7590 */
7591void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7592{
7593 /* The lazy approach for now... */
7594 uint8_t bUnmapInfo;
7595 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7596 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7597 pu256Dst->au64[0] = pu256Value->au64[0];
7598 pu256Dst->au64[1] = pu256Value->au64[1];
7599 pu256Dst->au64[2] = pu256Value->au64[2];
7600 pu256Dst->au64[3] = pu256Value->au64[3];
7601 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7602 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7603}
7604#endif
7605
7606
7607/**
7608 * Stores a data qqword.
7609 *
7610 * @returns Strict VBox status code.
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param iSegReg The index of the segment register to use for
7613 * this access. The base and limits are checked.
7614 * @param GCPtrMem The address of the guest memory.
7615 * @param pu256Value Pointer to the value to store.
7616 */
7617VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7618{
7619 /* The lazy approach for now... */
7620 uint8_t bUnmapInfo;
7621 PRTUINT256U pu256Dst;
7622 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7623 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7624 if (rc == VINF_SUCCESS)
7625 {
7626 pu256Dst->au64[0] = pu256Value->au64[0];
7627 pu256Dst->au64[1] = pu256Value->au64[1];
7628 pu256Dst->au64[2] = pu256Value->au64[2];
7629 pu256Dst->au64[3] = pu256Value->au64[3];
7630 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7631 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7632 }
7633 return rc;
7634}
7635
7636
7637#ifdef IEM_WITH_SETJMP
7638/**
7639 * Stores a data qqword, longjmp on error.
7640 *
7641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7642 * @param iSegReg The index of the segment register to use for
7643 * this access. The base and limits are checked.
7644 * @param GCPtrMem The address of the guest memory.
7645 * @param pu256Value Pointer to the value to store.
7646 */
7647void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7648{
7649 /* The lazy approach for now... */
7650 uint8_t bUnmapInfo;
7651 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7652 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7653 pu256Dst->au64[0] = pu256Value->au64[0];
7654 pu256Dst->au64[1] = pu256Value->au64[1];
7655 pu256Dst->au64[2] = pu256Value->au64[2];
7656 pu256Dst->au64[3] = pu256Value->au64[3];
7657 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7658 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7659}
7660#endif
7661
7662
7663/**
7664 * Stores a data dqword, AVX \#GP(0) aligned.
7665 *
7666 * @returns Strict VBox status code.
7667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7668 * @param iSegReg The index of the segment register to use for
7669 * this access. The base and limits are checked.
7670 * @param GCPtrMem The address of the guest memory.
7671 * @param pu256Value Pointer to the value to store.
7672 */
7673VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7674{
7675 /* The lazy approach for now... */
7676 uint8_t bUnmapInfo;
7677 PRTUINT256U pu256Dst;
7678 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7679 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7680 if (rc == VINF_SUCCESS)
7681 {
7682 pu256Dst->au64[0] = pu256Value->au64[0];
7683 pu256Dst->au64[1] = pu256Value->au64[1];
7684 pu256Dst->au64[2] = pu256Value->au64[2];
7685 pu256Dst->au64[3] = pu256Value->au64[3];
7686 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7687 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7688 }
7689 return rc;
7690}
7691
7692
7693#ifdef IEM_WITH_SETJMP
7694/**
7695 * Stores a data dqword, AVX aligned.
7696 *
7697 * @returns Strict VBox status code.
7698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7699 * @param iSegReg The index of the segment register to use for
7700 * this access. The base and limits are checked.
7701 * @param GCPtrMem The address of the guest memory.
7702 * @param pu256Value Pointer to the value to store.
7703 */
7704void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7705 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7706{
7707 /* The lazy approach for now... */
7708 uint8_t bUnmapInfo;
7709 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7710 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7711 pu256Dst->au64[0] = pu256Value->au64[0];
7712 pu256Dst->au64[1] = pu256Value->au64[1];
7713 pu256Dst->au64[2] = pu256Value->au64[2];
7714 pu256Dst->au64[3] = pu256Value->au64[3];
7715 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7716 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7717}
7718#endif
7719
7720
7721/**
7722 * Stores a descriptor register (sgdt, sidt).
7723 *
7724 * @returns Strict VBox status code.
7725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7726 * @param cbLimit The limit.
7727 * @param GCPtrBase The base address.
7728 * @param iSegReg The index of the segment register to use for
7729 * this access. The base and limits are checked.
7730 * @param GCPtrMem The address of the guest memory.
7731 */
7732VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7733{
7734 /*
7735 * The SIDT and SGDT instructions actually stores the data using two
7736 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7737 * does not respond to opsize prefixes.
7738 */
7739 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7740 if (rcStrict == VINF_SUCCESS)
7741 {
7742 if (IEM_IS_16BIT_CODE(pVCpu))
7743 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7744 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7745 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7746 else if (IEM_IS_32BIT_CODE(pVCpu))
7747 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7748 else
7749 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7750 }
7751 return rcStrict;
7752}
7753
7754
7755/**
7756 * Begin a special stack push (used by interrupt, exceptions and such).
7757 *
7758 * This will raise \#SS or \#PF if appropriate.
7759 *
7760 * @returns Strict VBox status code.
7761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7762 * @param cbMem The number of bytes to push onto the stack.
7763 * @param cbAlign The alignment mask (7, 3, 1).
7764 * @param ppvMem Where to return the pointer to the stack memory.
7765 * As with the other memory functions this could be
7766 * direct access or bounce buffered access, so
7767 * don't commit register until the commit call
7768 * succeeds.
7769 * @param pbUnmapInfo Where to store unmap info for
7770 * iemMemStackPushCommitSpecial.
7771 * @param puNewRsp Where to return the new RSP value. This must be
7772 * passed unchanged to
7773 * iemMemStackPushCommitSpecial().
7774 */
7775VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7776 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7777{
7778 Assert(cbMem < UINT8_MAX);
7779 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7780 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7781}
7782
7783
7784/**
7785 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7786 *
7787 * This will update the rSP.
7788 *
7789 * @returns Strict VBox status code.
7790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7791 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7792 * @param uNewRsp The new RSP value returned by
7793 * iemMemStackPushBeginSpecial().
7794 */
7795VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7796{
7797 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7798 if (rcStrict == VINF_SUCCESS)
7799 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7800 return rcStrict;
7801}
7802
7803
7804/**
7805 * Begin a special stack pop (used by iret, retf and such).
7806 *
7807 * This will raise \#SS or \#PF if appropriate.
7808 *
7809 * @returns Strict VBox status code.
7810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7811 * @param cbMem The number of bytes to pop from the stack.
7812 * @param cbAlign The alignment mask (7, 3, 1).
7813 * @param ppvMem Where to return the pointer to the stack memory.
7814 * @param pbUnmapInfo Where to store unmap info for
7815 * iemMemStackPopDoneSpecial.
7816 * @param puNewRsp Where to return the new RSP value. This must be
7817 * assigned to CPUMCTX::rsp manually some time
7818 * after iemMemStackPopDoneSpecial() has been
7819 * called.
7820 */
7821VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7822 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7823{
7824 Assert(cbMem < UINT8_MAX);
7825 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7826 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7827}
7828
7829
7830/**
7831 * Continue a special stack pop (used by iret and retf), for the purpose of
7832 * retrieving a new stack pointer.
7833 *
7834 * This will raise \#SS or \#PF if appropriate.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param off Offset from the top of the stack. This is zero
7839 * except in the retf case.
7840 * @param cbMem The number of bytes to pop from the stack.
7841 * @param ppvMem Where to return the pointer to the stack memory.
7842 * @param pbUnmapInfo Where to store unmap info for
7843 * iemMemStackPopDoneSpecial.
7844 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7845 * return this because all use of this function is
7846 * to retrieve a new value and anything we return
7847 * here would be discarded.)
7848 */
7849VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7850 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7851{
7852 Assert(cbMem < UINT8_MAX);
7853
7854 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7855 RTGCPTR GCPtrTop;
7856 if (IEM_IS_64BIT_CODE(pVCpu))
7857 GCPtrTop = uCurNewRsp;
7858 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7859 GCPtrTop = (uint32_t)uCurNewRsp;
7860 else
7861 GCPtrTop = (uint16_t)uCurNewRsp;
7862
7863 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7864 0 /* checked in iemMemStackPopBeginSpecial */);
7865}
7866
7867
7868/**
7869 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7870 * iemMemStackPopContinueSpecial).
7871 *
7872 * The caller will manually commit the rSP.
7873 *
7874 * @returns Strict VBox status code.
7875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7876 * @param bUnmapInfo Unmap information returned by
7877 * iemMemStackPopBeginSpecial() or
7878 * iemMemStackPopContinueSpecial().
7879 */
7880VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7881{
7882 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7883}
7884
7885
7886/**
7887 * Fetches a system table byte.
7888 *
7889 * @returns Strict VBox status code.
7890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7891 * @param pbDst Where to return the byte.
7892 * @param iSegReg The index of the segment register to use for
7893 * this access. The base and limits are checked.
7894 * @param GCPtrMem The address of the guest memory.
7895 */
7896VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7897{
7898 /* The lazy approach for now... */
7899 uint8_t bUnmapInfo;
7900 uint8_t const *pbSrc;
7901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7902 if (rc == VINF_SUCCESS)
7903 {
7904 *pbDst = *pbSrc;
7905 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7906 }
7907 return rc;
7908}
7909
7910
7911/**
7912 * Fetches a system table word.
7913 *
7914 * @returns Strict VBox status code.
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param pu16Dst Where to return the word.
7917 * @param iSegReg The index of the segment register to use for
7918 * this access. The base and limits are checked.
7919 * @param GCPtrMem The address of the guest memory.
7920 */
7921VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7922{
7923 /* The lazy approach for now... */
7924 uint8_t bUnmapInfo;
7925 uint16_t const *pu16Src;
7926 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7927 if (rc == VINF_SUCCESS)
7928 {
7929 *pu16Dst = *pu16Src;
7930 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7931 }
7932 return rc;
7933}
7934
7935
7936/**
7937 * Fetches a system table dword.
7938 *
7939 * @returns Strict VBox status code.
7940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7941 * @param pu32Dst Where to return the dword.
7942 * @param iSegReg The index of the segment register to use for
7943 * this access. The base and limits are checked.
7944 * @param GCPtrMem The address of the guest memory.
7945 */
7946VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7947{
7948 /* The lazy approach for now... */
7949 uint8_t bUnmapInfo;
7950 uint32_t const *pu32Src;
7951 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7952 if (rc == VINF_SUCCESS)
7953 {
7954 *pu32Dst = *pu32Src;
7955 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7956 }
7957 return rc;
7958}
7959
7960
7961/**
7962 * Fetches a system table qword.
7963 *
7964 * @returns Strict VBox status code.
7965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7966 * @param pu64Dst Where to return the qword.
7967 * @param iSegReg The index of the segment register to use for
7968 * this access. The base and limits are checked.
7969 * @param GCPtrMem The address of the guest memory.
7970 */
7971VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7972{
7973 /* The lazy approach for now... */
7974 uint8_t bUnmapInfo;
7975 uint64_t const *pu64Src;
7976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7977 if (rc == VINF_SUCCESS)
7978 {
7979 *pu64Dst = *pu64Src;
7980 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7981 }
7982 return rc;
7983}
7984
7985
7986/**
7987 * Fetches a descriptor table entry with caller specified error code.
7988 *
7989 * @returns Strict VBox status code.
7990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7991 * @param pDesc Where to return the descriptor table entry.
7992 * @param uSel The selector which table entry to fetch.
7993 * @param uXcpt The exception to raise on table lookup error.
7994 * @param uErrorCode The error code associated with the exception.
7995 */
7996static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7997 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7998{
7999 AssertPtr(pDesc);
8000 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8001
8002 /** @todo did the 286 require all 8 bytes to be accessible? */
8003 /*
8004 * Get the selector table base and check bounds.
8005 */
8006 RTGCPTR GCPtrBase;
8007 if (uSel & X86_SEL_LDT)
8008 {
8009 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8010 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8011 {
8012 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8013 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8014 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8015 uErrorCode, 0);
8016 }
8017
8018 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8019 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8020 }
8021 else
8022 {
8023 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8024 {
8025 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8026 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8027 uErrorCode, 0);
8028 }
8029 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8030 }
8031
8032 /*
8033 * Read the legacy descriptor and maybe the long mode extensions if
8034 * required.
8035 */
8036 VBOXSTRICTRC rcStrict;
8037 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8038 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8039 else
8040 {
8041 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8042 if (rcStrict == VINF_SUCCESS)
8043 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8044 if (rcStrict == VINF_SUCCESS)
8045 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8046 if (rcStrict == VINF_SUCCESS)
8047 pDesc->Legacy.au16[3] = 0;
8048 else
8049 return rcStrict;
8050 }
8051
8052 if (rcStrict == VINF_SUCCESS)
8053 {
8054 if ( !IEM_IS_LONG_MODE(pVCpu)
8055 || pDesc->Legacy.Gen.u1DescType)
8056 pDesc->Long.au64[1] = 0;
8057 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8058 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8059 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8060 else
8061 {
8062 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8063 /** @todo is this the right exception? */
8064 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8065 }
8066 }
8067 return rcStrict;
8068}
8069
8070
8071/**
8072 * Fetches a descriptor table entry.
8073 *
8074 * @returns Strict VBox status code.
8075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8076 * @param pDesc Where to return the descriptor table entry.
8077 * @param uSel The selector which table entry to fetch.
8078 * @param uXcpt The exception to raise on table lookup error.
8079 */
8080VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8081{
8082 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8083}
8084
8085
8086/**
8087 * Marks the selector descriptor as accessed (only non-system descriptors).
8088 *
8089 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8090 * will therefore skip the limit checks.
8091 *
8092 * @returns Strict VBox status code.
8093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8094 * @param uSel The selector.
8095 */
8096VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8097{
8098 /*
8099 * Get the selector table base and calculate the entry address.
8100 */
8101 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8102 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8103 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8104 GCPtr += uSel & X86_SEL_MASK;
8105
8106 /*
8107 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8108 * ugly stuff to avoid this. This will make sure it's an atomic access
8109 * as well more or less remove any question about 8-bit or 32-bit accesss.
8110 */
8111 VBOXSTRICTRC rcStrict;
8112 uint8_t bUnmapInfo;
8113 uint32_t volatile *pu32;
8114 if ((GCPtr & 3) == 0)
8115 {
8116 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8117 GCPtr += 2 + 2;
8118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8119 if (rcStrict != VINF_SUCCESS)
8120 return rcStrict;
8121 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8122 }
8123 else
8124 {
8125 /* The misaligned GDT/LDT case, map the whole thing. */
8126 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8127 if (rcStrict != VINF_SUCCESS)
8128 return rcStrict;
8129 switch ((uintptr_t)pu32 & 3)
8130 {
8131 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8132 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8133 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8134 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8135 }
8136 }
8137
8138 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8139}
8140
8141
8142#undef LOG_GROUP
8143#define LOG_GROUP LOG_GROUP_IEM
8144
8145/** @} */
8146
8147/** @name Opcode Helpers.
8148 * @{
8149 */
8150
8151/**
8152 * Calculates the effective address of a ModR/M memory operand.
8153 *
8154 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8155 *
8156 * @return Strict VBox status code.
8157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8158 * @param bRm The ModRM byte.
8159 * @param cbImmAndRspOffset - First byte: The size of any immediate
8160 * following the effective address opcode bytes
8161 * (only for RIP relative addressing).
8162 * - Second byte: RSP displacement (for POP [ESP]).
8163 * @param pGCPtrEff Where to return the effective address.
8164 */
8165VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8166{
8167 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8168# define SET_SS_DEF() \
8169 do \
8170 { \
8171 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8172 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8173 } while (0)
8174
8175 if (!IEM_IS_64BIT_CODE(pVCpu))
8176 {
8177/** @todo Check the effective address size crap! */
8178 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8179 {
8180 uint16_t u16EffAddr;
8181
8182 /* Handle the disp16 form with no registers first. */
8183 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8184 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8185 else
8186 {
8187 /* Get the displacment. */
8188 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8189 {
8190 case 0: u16EffAddr = 0; break;
8191 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8192 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8193 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8194 }
8195
8196 /* Add the base and index registers to the disp. */
8197 switch (bRm & X86_MODRM_RM_MASK)
8198 {
8199 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8200 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8201 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8202 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8203 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8204 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8205 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8206 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8207 }
8208 }
8209
8210 *pGCPtrEff = u16EffAddr;
8211 }
8212 else
8213 {
8214 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8215 uint32_t u32EffAddr;
8216
8217 /* Handle the disp32 form with no registers first. */
8218 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8219 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8220 else
8221 {
8222 /* Get the register (or SIB) value. */
8223 switch ((bRm & X86_MODRM_RM_MASK))
8224 {
8225 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8226 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8227 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8228 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8229 case 4: /* SIB */
8230 {
8231 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8232
8233 /* Get the index and scale it. */
8234 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8235 {
8236 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8237 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8238 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8239 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8240 case 4: u32EffAddr = 0; /*none */ break;
8241 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8242 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8243 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8245 }
8246 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8247
8248 /* add base */
8249 switch (bSib & X86_SIB_BASE_MASK)
8250 {
8251 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8252 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8253 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8254 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8255 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8256 case 5:
8257 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8258 {
8259 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8260 SET_SS_DEF();
8261 }
8262 else
8263 {
8264 uint32_t u32Disp;
8265 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8266 u32EffAddr += u32Disp;
8267 }
8268 break;
8269 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8270 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8272 }
8273 break;
8274 }
8275 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8276 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8277 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8278 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8279 }
8280
8281 /* Get and add the displacement. */
8282 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8283 {
8284 case 0:
8285 break;
8286 case 1:
8287 {
8288 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8289 u32EffAddr += i8Disp;
8290 break;
8291 }
8292 case 2:
8293 {
8294 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8295 u32EffAddr += u32Disp;
8296 break;
8297 }
8298 default:
8299 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8300 }
8301
8302 }
8303 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8304 *pGCPtrEff = u32EffAddr;
8305 }
8306 }
8307 else
8308 {
8309 uint64_t u64EffAddr;
8310
8311 /* Handle the rip+disp32 form with no registers first. */
8312 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8313 {
8314 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8315 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8316 }
8317 else
8318 {
8319 /* Get the register (or SIB) value. */
8320 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8321 {
8322 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8323 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8324 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8325 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8326 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8327 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8328 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8329 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8330 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8331 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8332 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8333 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8334 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8335 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8336 /* SIB */
8337 case 4:
8338 case 12:
8339 {
8340 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8341
8342 /* Get the index and scale it. */
8343 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8344 {
8345 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8346 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8347 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8348 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8349 case 4: u64EffAddr = 0; /*none */ break;
8350 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8351 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8352 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8353 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8354 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8355 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8356 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8357 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8358 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8359 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8360 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8361 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8362 }
8363 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8364
8365 /* add base */
8366 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8367 {
8368 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8369 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8370 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8371 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8372 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8373 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8374 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8375 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8376 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8377 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8378 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8379 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8380 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8381 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8382 /* complicated encodings */
8383 case 5:
8384 case 13:
8385 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8386 {
8387 if (!pVCpu->iem.s.uRexB)
8388 {
8389 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8390 SET_SS_DEF();
8391 }
8392 else
8393 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8394 }
8395 else
8396 {
8397 uint32_t u32Disp;
8398 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8399 u64EffAddr += (int32_t)u32Disp;
8400 }
8401 break;
8402 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8403 }
8404 break;
8405 }
8406 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8407 }
8408
8409 /* Get and add the displacement. */
8410 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8411 {
8412 case 0:
8413 break;
8414 case 1:
8415 {
8416 int8_t i8Disp;
8417 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8418 u64EffAddr += i8Disp;
8419 break;
8420 }
8421 case 2:
8422 {
8423 uint32_t u32Disp;
8424 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8425 u64EffAddr += (int32_t)u32Disp;
8426 break;
8427 }
8428 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8429 }
8430
8431 }
8432
8433 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8434 *pGCPtrEff = u64EffAddr;
8435 else
8436 {
8437 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8438 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8439 }
8440 }
8441
8442 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8443 return VINF_SUCCESS;
8444}
8445
8446
8447#ifdef IEM_WITH_SETJMP
8448/**
8449 * Calculates the effective address of a ModR/M memory operand.
8450 *
8451 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8452 *
8453 * May longjmp on internal error.
8454 *
8455 * @return The effective address.
8456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8457 * @param bRm The ModRM byte.
8458 * @param cbImmAndRspOffset - First byte: The size of any immediate
8459 * following the effective address opcode bytes
8460 * (only for RIP relative addressing).
8461 * - Second byte: RSP displacement (for POP [ESP]).
8462 */
8463RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8464{
8465 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8466# define SET_SS_DEF() \
8467 do \
8468 { \
8469 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8470 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8471 } while (0)
8472
8473 if (!IEM_IS_64BIT_CODE(pVCpu))
8474 {
8475/** @todo Check the effective address size crap! */
8476 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8477 {
8478 uint16_t u16EffAddr;
8479
8480 /* Handle the disp16 form with no registers first. */
8481 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8482 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8483 else
8484 {
8485 /* Get the displacment. */
8486 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8487 {
8488 case 0: u16EffAddr = 0; break;
8489 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8490 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8491 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8492 }
8493
8494 /* Add the base and index registers to the disp. */
8495 switch (bRm & X86_MODRM_RM_MASK)
8496 {
8497 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8498 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8499 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8500 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8501 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8502 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8503 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8504 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8505 }
8506 }
8507
8508 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8509 return u16EffAddr;
8510 }
8511
8512 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8513 uint32_t u32EffAddr;
8514
8515 /* Handle the disp32 form with no registers first. */
8516 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8517 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8518 else
8519 {
8520 /* Get the register (or SIB) value. */
8521 switch ((bRm & X86_MODRM_RM_MASK))
8522 {
8523 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8524 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8525 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8526 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8527 case 4: /* SIB */
8528 {
8529 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8530
8531 /* Get the index and scale it. */
8532 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8533 {
8534 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8535 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8536 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8537 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8538 case 4: u32EffAddr = 0; /*none */ break;
8539 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8540 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8541 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8542 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8543 }
8544 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8545
8546 /* add base */
8547 switch (bSib & X86_SIB_BASE_MASK)
8548 {
8549 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8550 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8551 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8552 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8553 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8554 case 5:
8555 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8556 {
8557 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8558 SET_SS_DEF();
8559 }
8560 else
8561 {
8562 uint32_t u32Disp;
8563 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8564 u32EffAddr += u32Disp;
8565 }
8566 break;
8567 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8568 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8569 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8570 }
8571 break;
8572 }
8573 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8574 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8575 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8576 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8577 }
8578
8579 /* Get and add the displacement. */
8580 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8581 {
8582 case 0:
8583 break;
8584 case 1:
8585 {
8586 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8587 u32EffAddr += i8Disp;
8588 break;
8589 }
8590 case 2:
8591 {
8592 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8593 u32EffAddr += u32Disp;
8594 break;
8595 }
8596 default:
8597 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8598 }
8599 }
8600
8601 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8602 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8603 return u32EffAddr;
8604 }
8605
8606 uint64_t u64EffAddr;
8607
8608 /* Handle the rip+disp32 form with no registers first. */
8609 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8610 {
8611 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8612 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8613 }
8614 else
8615 {
8616 /* Get the register (or SIB) value. */
8617 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8618 {
8619 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8620 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8621 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8622 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8623 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8624 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8625 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8626 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8627 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8628 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8629 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8630 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8631 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8632 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8633 /* SIB */
8634 case 4:
8635 case 12:
8636 {
8637 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8638
8639 /* Get the index and scale it. */
8640 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8641 {
8642 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8643 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8644 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8645 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8646 case 4: u64EffAddr = 0; /*none */ break;
8647 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8648 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8649 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8650 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8651 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8652 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8653 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8654 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8655 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8656 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8657 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8658 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8659 }
8660 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8661
8662 /* add base */
8663 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8664 {
8665 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8666 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8667 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8668 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8669 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8670 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8671 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8672 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8673 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8674 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8675 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8676 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8677 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8678 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8679 /* complicated encodings */
8680 case 5:
8681 case 13:
8682 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8683 {
8684 if (!pVCpu->iem.s.uRexB)
8685 {
8686 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8687 SET_SS_DEF();
8688 }
8689 else
8690 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8691 }
8692 else
8693 {
8694 uint32_t u32Disp;
8695 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8696 u64EffAddr += (int32_t)u32Disp;
8697 }
8698 break;
8699 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8700 }
8701 break;
8702 }
8703 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8704 }
8705
8706 /* Get and add the displacement. */
8707 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8708 {
8709 case 0:
8710 break;
8711 case 1:
8712 {
8713 int8_t i8Disp;
8714 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8715 u64EffAddr += i8Disp;
8716 break;
8717 }
8718 case 2:
8719 {
8720 uint32_t u32Disp;
8721 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8722 u64EffAddr += (int32_t)u32Disp;
8723 break;
8724 }
8725 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8726 }
8727
8728 }
8729
8730 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8731 {
8732 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8733 return u64EffAddr;
8734 }
8735 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8736 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8737 return u64EffAddr & UINT32_MAX;
8738}
8739#endif /* IEM_WITH_SETJMP */
8740
8741
8742/**
8743 * Calculates the effective address of a ModR/M memory operand, extended version
8744 * for use in the recompilers.
8745 *
8746 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8747 *
8748 * @return Strict VBox status code.
8749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8750 * @param bRm The ModRM byte.
8751 * @param cbImmAndRspOffset - First byte: The size of any immediate
8752 * following the effective address opcode bytes
8753 * (only for RIP relative addressing).
8754 * - Second byte: RSP displacement (for POP [ESP]).
8755 * @param pGCPtrEff Where to return the effective address.
8756 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8757 * SIB byte (bits 39:32).
8758 */
8759VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8760{
8761 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8762# define SET_SS_DEF() \
8763 do \
8764 { \
8765 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8766 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8767 } while (0)
8768
8769 uint64_t uInfo;
8770 if (!IEM_IS_64BIT_CODE(pVCpu))
8771 {
8772/** @todo Check the effective address size crap! */
8773 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8774 {
8775 uint16_t u16EffAddr;
8776
8777 /* Handle the disp16 form with no registers first. */
8778 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8779 {
8780 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8781 uInfo = u16EffAddr;
8782 }
8783 else
8784 {
8785 /* Get the displacment. */
8786 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8787 {
8788 case 0: u16EffAddr = 0; break;
8789 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8790 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8791 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8792 }
8793 uInfo = u16EffAddr;
8794
8795 /* Add the base and index registers to the disp. */
8796 switch (bRm & X86_MODRM_RM_MASK)
8797 {
8798 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8799 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8800 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8801 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8802 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8803 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8804 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8805 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8806 }
8807 }
8808
8809 *pGCPtrEff = u16EffAddr;
8810 }
8811 else
8812 {
8813 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8814 uint32_t u32EffAddr;
8815
8816 /* Handle the disp32 form with no registers first. */
8817 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8818 {
8819 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8820 uInfo = u32EffAddr;
8821 }
8822 else
8823 {
8824 /* Get the register (or SIB) value. */
8825 uInfo = 0;
8826 switch ((bRm & X86_MODRM_RM_MASK))
8827 {
8828 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8829 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8830 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8831 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8832 case 4: /* SIB */
8833 {
8834 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8835 uInfo = (uint64_t)bSib << 32;
8836
8837 /* Get the index and scale it. */
8838 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8839 {
8840 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8841 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8842 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8843 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8844 case 4: u32EffAddr = 0; /*none */ break;
8845 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8846 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8847 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8849 }
8850 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8851
8852 /* add base */
8853 switch (bSib & X86_SIB_BASE_MASK)
8854 {
8855 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8856 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8857 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8858 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8859 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8860 case 5:
8861 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8862 {
8863 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8864 SET_SS_DEF();
8865 }
8866 else
8867 {
8868 uint32_t u32Disp;
8869 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8870 u32EffAddr += u32Disp;
8871 uInfo |= u32Disp;
8872 }
8873 break;
8874 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8875 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8877 }
8878 break;
8879 }
8880 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8881 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8882 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8884 }
8885
8886 /* Get and add the displacement. */
8887 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8888 {
8889 case 0:
8890 break;
8891 case 1:
8892 {
8893 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8894 u32EffAddr += i8Disp;
8895 uInfo |= (uint32_t)(int32_t)i8Disp;
8896 break;
8897 }
8898 case 2:
8899 {
8900 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8901 u32EffAddr += u32Disp;
8902 uInfo |= (uint32_t)u32Disp;
8903 break;
8904 }
8905 default:
8906 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8907 }
8908
8909 }
8910 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8911 *pGCPtrEff = u32EffAddr;
8912 }
8913 }
8914 else
8915 {
8916 uint64_t u64EffAddr;
8917
8918 /* Handle the rip+disp32 form with no registers first. */
8919 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8920 {
8921 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8922 uInfo = (uint32_t)u64EffAddr;
8923 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8924 }
8925 else
8926 {
8927 /* Get the register (or SIB) value. */
8928 uInfo = 0;
8929 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8930 {
8931 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8932 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8933 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8934 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8935 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8936 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8937 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8938 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8939 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8940 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8941 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8942 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8943 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8944 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8945 /* SIB */
8946 case 4:
8947 case 12:
8948 {
8949 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8950 uInfo = (uint64_t)bSib << 32;
8951
8952 /* Get the index and scale it. */
8953 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8954 {
8955 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8956 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8957 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8958 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8959 case 4: u64EffAddr = 0; /*none */ break;
8960 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8961 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8962 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8963 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8964 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8965 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8966 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8967 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8968 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8969 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8970 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8972 }
8973 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8974
8975 /* add base */
8976 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8977 {
8978 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8979 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8980 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8981 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8982 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8983 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8984 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8985 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8986 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8987 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8988 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8989 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8990 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8991 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8992 /* complicated encodings */
8993 case 5:
8994 case 13:
8995 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8996 {
8997 if (!pVCpu->iem.s.uRexB)
8998 {
8999 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9000 SET_SS_DEF();
9001 }
9002 else
9003 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9004 }
9005 else
9006 {
9007 uint32_t u32Disp;
9008 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9009 u64EffAddr += (int32_t)u32Disp;
9010 uInfo |= u32Disp;
9011 }
9012 break;
9013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9014 }
9015 break;
9016 }
9017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9018 }
9019
9020 /* Get and add the displacement. */
9021 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9022 {
9023 case 0:
9024 break;
9025 case 1:
9026 {
9027 int8_t i8Disp;
9028 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9029 u64EffAddr += i8Disp;
9030 uInfo |= (uint32_t)(int32_t)i8Disp;
9031 break;
9032 }
9033 case 2:
9034 {
9035 uint32_t u32Disp;
9036 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9037 u64EffAddr += (int32_t)u32Disp;
9038 uInfo |= u32Disp;
9039 break;
9040 }
9041 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9042 }
9043
9044 }
9045
9046 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9047 *pGCPtrEff = u64EffAddr;
9048 else
9049 {
9050 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9051 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9052 }
9053 }
9054 *puInfo = uInfo;
9055
9056 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9057 return VINF_SUCCESS;
9058}
9059
9060/** @} */
9061
9062
9063#ifdef LOG_ENABLED
9064/**
9065 * Logs the current instruction.
9066 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9067 * @param fSameCtx Set if we have the same context information as the VMM,
9068 * clear if we may have already executed an instruction in
9069 * our debug context. When clear, we assume IEMCPU holds
9070 * valid CPU mode info.
9071 *
9072 * The @a fSameCtx parameter is now misleading and obsolete.
9073 * @param pszFunction The IEM function doing the execution.
9074 */
9075static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9076{
9077# ifdef IN_RING3
9078 if (LogIs2Enabled())
9079 {
9080 char szInstr[256];
9081 uint32_t cbInstr = 0;
9082 if (fSameCtx)
9083 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9084 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9085 szInstr, sizeof(szInstr), &cbInstr);
9086 else
9087 {
9088 uint32_t fFlags = 0;
9089 switch (IEM_GET_CPU_MODE(pVCpu))
9090 {
9091 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9092 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9093 case IEMMODE_16BIT:
9094 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9095 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9096 else
9097 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9098 break;
9099 }
9100 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9101 szInstr, sizeof(szInstr), &cbInstr);
9102 }
9103
9104 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9105 Log2(("**** %s fExec=%x\n"
9106 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9107 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9108 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9109 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9110 " %s\n"
9111 , pszFunction, pVCpu->iem.s.fExec,
9112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9117 szInstr));
9118
9119 /* This stuff sucks atm. as it fills the log with MSRs. */
9120 //if (LogIs3Enabled())
9121 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9122 }
9123 else
9124# endif
9125 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9126 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9127 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9128}
9129#endif /* LOG_ENABLED */
9130
9131
9132#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9133/**
9134 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9135 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9136 *
9137 * @returns Modified rcStrict.
9138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9139 * @param rcStrict The instruction execution status.
9140 */
9141static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9142{
9143 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9144 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9145 {
9146 /* VMX preemption timer takes priority over NMI-window exits. */
9147 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9148 {
9149 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9150 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9151 }
9152 /*
9153 * Check remaining intercepts.
9154 *
9155 * NMI-window and Interrupt-window VM-exits.
9156 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9157 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9158 *
9159 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9160 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9161 */
9162 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9163 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9164 && !TRPMHasTrap(pVCpu))
9165 {
9166 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9167 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9168 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9169 {
9170 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9171 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9172 }
9173 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9174 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9175 {
9176 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9177 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9178 }
9179 }
9180 }
9181 /* TPR-below threshold/APIC write has the highest priority. */
9182 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9183 {
9184 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9185 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9186 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9187 }
9188 /* MTF takes priority over VMX-preemption timer. */
9189 else
9190 {
9191 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9192 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9193 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9194 }
9195 return rcStrict;
9196}
9197#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9198
9199
9200/**
9201 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9202 * IEMExecOneWithPrefetchedByPC.
9203 *
9204 * Similar code is found in IEMExecLots.
9205 *
9206 * @return Strict VBox status code.
9207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9208 * @param fExecuteInhibit If set, execute the instruction following CLI,
9209 * POP SS and MOV SS,GR.
9210 * @param pszFunction The calling function name.
9211 */
9212DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9213{
9214 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9215 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9216 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9217 RT_NOREF_PV(pszFunction);
9218
9219#ifdef IEM_WITH_SETJMP
9220 VBOXSTRICTRC rcStrict;
9221 IEM_TRY_SETJMP(pVCpu, rcStrict)
9222 {
9223 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9224 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9225 }
9226 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9227 {
9228 pVCpu->iem.s.cLongJumps++;
9229 }
9230 IEM_CATCH_LONGJMP_END(pVCpu);
9231#else
9232 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9233 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9234#endif
9235 if (rcStrict == VINF_SUCCESS)
9236 pVCpu->iem.s.cInstructions++;
9237 if (pVCpu->iem.s.cActiveMappings > 0)
9238 {
9239 Assert(rcStrict != VINF_SUCCESS);
9240 iemMemRollback(pVCpu);
9241 }
9242 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9243 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9244 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9245
9246//#ifdef DEBUG
9247// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9248//#endif
9249
9250#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9251 /*
9252 * Perform any VMX nested-guest instruction boundary actions.
9253 *
9254 * If any of these causes a VM-exit, we must skip executing the next
9255 * instruction (would run into stale page tables). A VM-exit makes sure
9256 * there is no interrupt-inhibition, so that should ensure we don't go
9257 * to try execute the next instruction. Clearing fExecuteInhibit is
9258 * problematic because of the setjmp/longjmp clobbering above.
9259 */
9260 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9261 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9262 || rcStrict != VINF_SUCCESS)
9263 { /* likely */ }
9264 else
9265 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9266#endif
9267
9268 /* Execute the next instruction as well if a cli, pop ss or
9269 mov ss, Gr has just completed successfully. */
9270 if ( fExecuteInhibit
9271 && rcStrict == VINF_SUCCESS
9272 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9273 {
9274 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9275 if (rcStrict == VINF_SUCCESS)
9276 {
9277#ifdef LOG_ENABLED
9278 iemLogCurInstr(pVCpu, false, pszFunction);
9279#endif
9280#ifdef IEM_WITH_SETJMP
9281 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9282 {
9283 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9284 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9285 }
9286 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9287 {
9288 pVCpu->iem.s.cLongJumps++;
9289 }
9290 IEM_CATCH_LONGJMP_END(pVCpu);
9291#else
9292 IEM_OPCODE_GET_FIRST_U8(&b);
9293 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9294#endif
9295 if (rcStrict == VINF_SUCCESS)
9296 {
9297 pVCpu->iem.s.cInstructions++;
9298#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9299 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9300 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9301 { /* likely */ }
9302 else
9303 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9304#endif
9305 }
9306 if (pVCpu->iem.s.cActiveMappings > 0)
9307 {
9308 Assert(rcStrict != VINF_SUCCESS);
9309 iemMemRollback(pVCpu);
9310 }
9311 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9312 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9313 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9314 }
9315 else if (pVCpu->iem.s.cActiveMappings > 0)
9316 iemMemRollback(pVCpu);
9317 /** @todo drop this after we bake this change into RIP advancing. */
9318 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9319 }
9320
9321 /*
9322 * Return value fiddling, statistics and sanity assertions.
9323 */
9324 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9325
9326 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9328 return rcStrict;
9329}
9330
9331
9332/**
9333 * Execute one instruction.
9334 *
9335 * @return Strict VBox status code.
9336 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9337 */
9338VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9339{
9340 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9341#ifdef LOG_ENABLED
9342 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9343#endif
9344
9345 /*
9346 * Do the decoding and emulation.
9347 */
9348 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9349 if (rcStrict == VINF_SUCCESS)
9350 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9351 else if (pVCpu->iem.s.cActiveMappings > 0)
9352 iemMemRollback(pVCpu);
9353
9354 if (rcStrict != VINF_SUCCESS)
9355 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9356 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9357 return rcStrict;
9358}
9359
9360
9361VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9362{
9363 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9364 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9365 if (rcStrict == VINF_SUCCESS)
9366 {
9367 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9368 if (pcbWritten)
9369 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9370 }
9371 else if (pVCpu->iem.s.cActiveMappings > 0)
9372 iemMemRollback(pVCpu);
9373
9374 return rcStrict;
9375}
9376
9377
9378VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9379 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9380{
9381 VBOXSTRICTRC rcStrict;
9382 if ( cbOpcodeBytes
9383 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9384 {
9385 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9386#ifdef IEM_WITH_CODE_TLB
9387 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9388 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9389 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9390 pVCpu->iem.s.offCurInstrStart = 0;
9391 pVCpu->iem.s.offInstrNextByte = 0;
9392 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9393#else
9394 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9395 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9396#endif
9397 rcStrict = VINF_SUCCESS;
9398 }
9399 else
9400 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9401 if (rcStrict == VINF_SUCCESS)
9402 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9403 else if (pVCpu->iem.s.cActiveMappings > 0)
9404 iemMemRollback(pVCpu);
9405
9406 return rcStrict;
9407}
9408
9409
9410VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9411{
9412 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9413 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9414 if (rcStrict == VINF_SUCCESS)
9415 {
9416 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9417 if (pcbWritten)
9418 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9419 }
9420 else if (pVCpu->iem.s.cActiveMappings > 0)
9421 iemMemRollback(pVCpu);
9422
9423 return rcStrict;
9424}
9425
9426
9427VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9428 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9429{
9430 VBOXSTRICTRC rcStrict;
9431 if ( cbOpcodeBytes
9432 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9433 {
9434 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9435#ifdef IEM_WITH_CODE_TLB
9436 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9437 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9438 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9439 pVCpu->iem.s.offCurInstrStart = 0;
9440 pVCpu->iem.s.offInstrNextByte = 0;
9441 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9442#else
9443 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9444 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9445#endif
9446 rcStrict = VINF_SUCCESS;
9447 }
9448 else
9449 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9450 if (rcStrict == VINF_SUCCESS)
9451 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9452 else if (pVCpu->iem.s.cActiveMappings > 0)
9453 iemMemRollback(pVCpu);
9454
9455 return rcStrict;
9456}
9457
9458
9459/**
9460 * For handling split cacheline lock operations when the host has split-lock
9461 * detection enabled.
9462 *
9463 * This will cause the interpreter to disregard the lock prefix and implicit
9464 * locking (xchg).
9465 *
9466 * @returns Strict VBox status code.
9467 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9468 */
9469VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9470{
9471 /*
9472 * Do the decoding and emulation.
9473 */
9474 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9475 if (rcStrict == VINF_SUCCESS)
9476 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9477 else if (pVCpu->iem.s.cActiveMappings > 0)
9478 iemMemRollback(pVCpu);
9479
9480 if (rcStrict != VINF_SUCCESS)
9481 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9482 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9483 return rcStrict;
9484}
9485
9486
9487/**
9488 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9489 * inject a pending TRPM trap.
9490 */
9491VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9492{
9493 Assert(TRPMHasTrap(pVCpu));
9494
9495 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9496 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9497 {
9498 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9499#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9500 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9501 if (fIntrEnabled)
9502 {
9503 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9504 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9505 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9506 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9507 else
9508 {
9509 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9510 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9511 }
9512 }
9513#else
9514 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9515#endif
9516 if (fIntrEnabled)
9517 {
9518 uint8_t u8TrapNo;
9519 TRPMEVENT enmType;
9520 uint32_t uErrCode;
9521 RTGCPTR uCr2;
9522 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9523 AssertRC(rc2);
9524 Assert(enmType == TRPM_HARDWARE_INT);
9525 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9526
9527 TRPMResetTrap(pVCpu);
9528
9529#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9530 /* Injecting an event may cause a VM-exit. */
9531 if ( rcStrict != VINF_SUCCESS
9532 && rcStrict != VINF_IEM_RAISED_XCPT)
9533 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9534#else
9535 NOREF(rcStrict);
9536#endif
9537 }
9538 }
9539
9540 return VINF_SUCCESS;
9541}
9542
9543
9544VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9545{
9546 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9547 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9548 Assert(cMaxInstructions > 0);
9549
9550 /*
9551 * See if there is an interrupt pending in TRPM, inject it if we can.
9552 */
9553 /** @todo What if we are injecting an exception and not an interrupt? Is that
9554 * possible here? For now we assert it is indeed only an interrupt. */
9555 if (!TRPMHasTrap(pVCpu))
9556 { /* likely */ }
9557 else
9558 {
9559 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9560 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9561 { /*likely */ }
9562 else
9563 return rcStrict;
9564 }
9565
9566 /*
9567 * Initial decoder init w/ prefetch, then setup setjmp.
9568 */
9569 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9570 if (rcStrict == VINF_SUCCESS)
9571 {
9572#ifdef IEM_WITH_SETJMP
9573 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9574 IEM_TRY_SETJMP(pVCpu, rcStrict)
9575#endif
9576 {
9577 /*
9578 * The run loop. We limit ourselves to 4096 instructions right now.
9579 */
9580 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9581 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9582 for (;;)
9583 {
9584 /*
9585 * Log the state.
9586 */
9587#ifdef LOG_ENABLED
9588 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9589#endif
9590
9591 /*
9592 * Do the decoding and emulation.
9593 */
9594 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9595 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9596#ifdef VBOX_STRICT
9597 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9598#endif
9599 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9600 {
9601 Assert(pVCpu->iem.s.cActiveMappings == 0);
9602 pVCpu->iem.s.cInstructions++;
9603
9604#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9605 /* Perform any VMX nested-guest instruction boundary actions. */
9606 uint64_t fCpu = pVCpu->fLocalForcedActions;
9607 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9608 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9609 { /* likely */ }
9610 else
9611 {
9612 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9613 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9614 fCpu = pVCpu->fLocalForcedActions;
9615 else
9616 {
9617 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9618 break;
9619 }
9620 }
9621#endif
9622 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9623 {
9624#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9625 uint64_t fCpu = pVCpu->fLocalForcedActions;
9626#endif
9627 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9628 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9629 | VMCPU_FF_TLB_FLUSH
9630 | VMCPU_FF_UNHALT );
9631
9632 if (RT_LIKELY( ( !fCpu
9633 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9634 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9635 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9636 {
9637 if (--cMaxInstructionsGccStupidity > 0)
9638 {
9639 /* Poll timers every now an then according to the caller's specs. */
9640 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9641 || !TMTimerPollBool(pVM, pVCpu))
9642 {
9643 Assert(pVCpu->iem.s.cActiveMappings == 0);
9644 iemReInitDecoder(pVCpu);
9645 continue;
9646 }
9647 }
9648 }
9649 }
9650 Assert(pVCpu->iem.s.cActiveMappings == 0);
9651 }
9652 else if (pVCpu->iem.s.cActiveMappings > 0)
9653 iemMemRollback(pVCpu);
9654 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9655 break;
9656 }
9657 }
9658#ifdef IEM_WITH_SETJMP
9659 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9660 {
9661 if (pVCpu->iem.s.cActiveMappings > 0)
9662 iemMemRollback(pVCpu);
9663# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9664 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9665# endif
9666 pVCpu->iem.s.cLongJumps++;
9667 }
9668 IEM_CATCH_LONGJMP_END(pVCpu);
9669#endif
9670
9671 /*
9672 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9673 */
9674 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9675 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9676 }
9677 else
9678 {
9679 if (pVCpu->iem.s.cActiveMappings > 0)
9680 iemMemRollback(pVCpu);
9681
9682#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9683 /*
9684 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9685 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9686 */
9687 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9688#endif
9689 }
9690
9691 /*
9692 * Maybe re-enter raw-mode and log.
9693 */
9694 if (rcStrict != VINF_SUCCESS)
9695 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9696 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9697 if (pcInstructions)
9698 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9699 return rcStrict;
9700}
9701
9702
9703/**
9704 * Interface used by EMExecuteExec, does exit statistics and limits.
9705 *
9706 * @returns Strict VBox status code.
9707 * @param pVCpu The cross context virtual CPU structure.
9708 * @param fWillExit To be defined.
9709 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9710 * @param cMaxInstructions Maximum number of instructions to execute.
9711 * @param cMaxInstructionsWithoutExits
9712 * The max number of instructions without exits.
9713 * @param pStats Where to return statistics.
9714 */
9715VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9716 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9717{
9718 NOREF(fWillExit); /** @todo define flexible exit crits */
9719
9720 /*
9721 * Initialize return stats.
9722 */
9723 pStats->cInstructions = 0;
9724 pStats->cExits = 0;
9725 pStats->cMaxExitDistance = 0;
9726 pStats->cReserved = 0;
9727
9728 /*
9729 * Initial decoder init w/ prefetch, then setup setjmp.
9730 */
9731 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9732 if (rcStrict == VINF_SUCCESS)
9733 {
9734#ifdef IEM_WITH_SETJMP
9735 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9736 IEM_TRY_SETJMP(pVCpu, rcStrict)
9737#endif
9738 {
9739#ifdef IN_RING0
9740 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9741#endif
9742 uint32_t cInstructionSinceLastExit = 0;
9743
9744 /*
9745 * The run loop. We limit ourselves to 4096 instructions right now.
9746 */
9747 PVM pVM = pVCpu->CTX_SUFF(pVM);
9748 for (;;)
9749 {
9750 /*
9751 * Log the state.
9752 */
9753#ifdef LOG_ENABLED
9754 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9755#endif
9756
9757 /*
9758 * Do the decoding and emulation.
9759 */
9760 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9761
9762 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9763 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9764
9765 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9766 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9767 {
9768 pStats->cExits += 1;
9769 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9770 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9771 cInstructionSinceLastExit = 0;
9772 }
9773
9774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9775 {
9776 Assert(pVCpu->iem.s.cActiveMappings == 0);
9777 pVCpu->iem.s.cInstructions++;
9778 pStats->cInstructions++;
9779 cInstructionSinceLastExit++;
9780
9781#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9782 /* Perform any VMX nested-guest instruction boundary actions. */
9783 uint64_t fCpu = pVCpu->fLocalForcedActions;
9784 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9785 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9786 { /* likely */ }
9787 else
9788 {
9789 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9790 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9791 fCpu = pVCpu->fLocalForcedActions;
9792 else
9793 {
9794 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9795 break;
9796 }
9797 }
9798#endif
9799 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9800 {
9801#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9802 uint64_t fCpu = pVCpu->fLocalForcedActions;
9803#endif
9804 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9805 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9806 | VMCPU_FF_TLB_FLUSH
9807 | VMCPU_FF_UNHALT );
9808 if (RT_LIKELY( ( ( !fCpu
9809 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9810 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9811 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9812 || pStats->cInstructions < cMinInstructions))
9813 {
9814 if (pStats->cInstructions < cMaxInstructions)
9815 {
9816 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9817 {
9818#ifdef IN_RING0
9819 if ( !fCheckPreemptionPending
9820 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9821#endif
9822 {
9823 Assert(pVCpu->iem.s.cActiveMappings == 0);
9824 iemReInitDecoder(pVCpu);
9825 continue;
9826 }
9827#ifdef IN_RING0
9828 rcStrict = VINF_EM_RAW_INTERRUPT;
9829 break;
9830#endif
9831 }
9832 }
9833 }
9834 Assert(!(fCpu & VMCPU_FF_IEM));
9835 }
9836 Assert(pVCpu->iem.s.cActiveMappings == 0);
9837 }
9838 else if (pVCpu->iem.s.cActiveMappings > 0)
9839 iemMemRollback(pVCpu);
9840 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9841 break;
9842 }
9843 }
9844#ifdef IEM_WITH_SETJMP
9845 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9846 {
9847 if (pVCpu->iem.s.cActiveMappings > 0)
9848 iemMemRollback(pVCpu);
9849 pVCpu->iem.s.cLongJumps++;
9850 }
9851 IEM_CATCH_LONGJMP_END(pVCpu);
9852#endif
9853
9854 /*
9855 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9856 */
9857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9859 }
9860 else
9861 {
9862 if (pVCpu->iem.s.cActiveMappings > 0)
9863 iemMemRollback(pVCpu);
9864
9865#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9866 /*
9867 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9868 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9869 */
9870 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9871#endif
9872 }
9873
9874 /*
9875 * Maybe re-enter raw-mode and log.
9876 */
9877 if (rcStrict != VINF_SUCCESS)
9878 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9879 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9880 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9881 return rcStrict;
9882}
9883
9884
9885/**
9886 * Injects a trap, fault, abort, software interrupt or external interrupt.
9887 *
9888 * The parameter list matches TRPMQueryTrapAll pretty closely.
9889 *
9890 * @returns Strict VBox status code.
9891 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9892 * @param u8TrapNo The trap number.
9893 * @param enmType What type is it (trap/fault/abort), software
9894 * interrupt or hardware interrupt.
9895 * @param uErrCode The error code if applicable.
9896 * @param uCr2 The CR2 value if applicable.
9897 * @param cbInstr The instruction length (only relevant for
9898 * software interrupts).
9899 */
9900VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9901 uint8_t cbInstr)
9902{
9903 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9904#ifdef DBGFTRACE_ENABLED
9905 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9906 u8TrapNo, enmType, uErrCode, uCr2);
9907#endif
9908
9909 uint32_t fFlags;
9910 switch (enmType)
9911 {
9912 case TRPM_HARDWARE_INT:
9913 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9914 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9915 uErrCode = uCr2 = 0;
9916 break;
9917
9918 case TRPM_SOFTWARE_INT:
9919 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9920 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9921 uErrCode = uCr2 = 0;
9922 break;
9923
9924 case TRPM_TRAP:
9925 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9926 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9927 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9928 if (u8TrapNo == X86_XCPT_PF)
9929 fFlags |= IEM_XCPT_FLAGS_CR2;
9930 switch (u8TrapNo)
9931 {
9932 case X86_XCPT_DF:
9933 case X86_XCPT_TS:
9934 case X86_XCPT_NP:
9935 case X86_XCPT_SS:
9936 case X86_XCPT_PF:
9937 case X86_XCPT_AC:
9938 case X86_XCPT_GP:
9939 fFlags |= IEM_XCPT_FLAGS_ERR;
9940 break;
9941 }
9942 break;
9943
9944 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9945 }
9946
9947 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9948
9949 if (pVCpu->iem.s.cActiveMappings > 0)
9950 iemMemRollback(pVCpu);
9951
9952 return rcStrict;
9953}
9954
9955
9956/**
9957 * Injects the active TRPM event.
9958 *
9959 * @returns Strict VBox status code.
9960 * @param pVCpu The cross context virtual CPU structure.
9961 */
9962VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9963{
9964#ifndef IEM_IMPLEMENTS_TASKSWITCH
9965 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9966#else
9967 uint8_t u8TrapNo;
9968 TRPMEVENT enmType;
9969 uint32_t uErrCode;
9970 RTGCUINTPTR uCr2;
9971 uint8_t cbInstr;
9972 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9973 if (RT_FAILURE(rc))
9974 return rc;
9975
9976 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9977 * ICEBP \#DB injection as a special case. */
9978 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9979#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9980 if (rcStrict == VINF_SVM_VMEXIT)
9981 rcStrict = VINF_SUCCESS;
9982#endif
9983#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9984 if (rcStrict == VINF_VMX_VMEXIT)
9985 rcStrict = VINF_SUCCESS;
9986#endif
9987 /** @todo Are there any other codes that imply the event was successfully
9988 * delivered to the guest? See @bugref{6607}. */
9989 if ( rcStrict == VINF_SUCCESS
9990 || rcStrict == VINF_IEM_RAISED_XCPT)
9991 TRPMResetTrap(pVCpu);
9992
9993 return rcStrict;
9994#endif
9995}
9996
9997
9998VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9999{
10000 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10001 return VERR_NOT_IMPLEMENTED;
10002}
10003
10004
10005VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10006{
10007 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10008 return VERR_NOT_IMPLEMENTED;
10009}
10010
10011
10012/**
10013 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10014 *
10015 * This API ASSUMES that the caller has already verified that the guest code is
10016 * allowed to access the I/O port. (The I/O port is in the DX register in the
10017 * guest state.)
10018 *
10019 * @returns Strict VBox status code.
10020 * @param pVCpu The cross context virtual CPU structure.
10021 * @param cbValue The size of the I/O port access (1, 2, or 4).
10022 * @param enmAddrMode The addressing mode.
10023 * @param fRepPrefix Indicates whether a repeat prefix is used
10024 * (doesn't matter which for this instruction).
10025 * @param cbInstr The instruction length in bytes.
10026 * @param iEffSeg The effective segment address.
10027 * @param fIoChecked Whether the access to the I/O port has been
10028 * checked or not. It's typically checked in the
10029 * HM scenario.
10030 */
10031VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10032 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10033{
10034 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10035 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10036
10037 /*
10038 * State init.
10039 */
10040 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10041
10042 /*
10043 * Switch orgy for getting to the right handler.
10044 */
10045 VBOXSTRICTRC rcStrict;
10046 if (fRepPrefix)
10047 {
10048 switch (enmAddrMode)
10049 {
10050 case IEMMODE_16BIT:
10051 switch (cbValue)
10052 {
10053 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10054 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10055 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10056 default:
10057 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10058 }
10059 break;
10060
10061 case IEMMODE_32BIT:
10062 switch (cbValue)
10063 {
10064 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10065 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10066 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10067 default:
10068 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10069 }
10070 break;
10071
10072 case IEMMODE_64BIT:
10073 switch (cbValue)
10074 {
10075 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10076 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10077 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10078 default:
10079 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10080 }
10081 break;
10082
10083 default:
10084 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10085 }
10086 }
10087 else
10088 {
10089 switch (enmAddrMode)
10090 {
10091 case IEMMODE_16BIT:
10092 switch (cbValue)
10093 {
10094 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10095 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10096 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10097 default:
10098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10099 }
10100 break;
10101
10102 case IEMMODE_32BIT:
10103 switch (cbValue)
10104 {
10105 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10106 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10107 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10108 default:
10109 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10110 }
10111 break;
10112
10113 case IEMMODE_64BIT:
10114 switch (cbValue)
10115 {
10116 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10117 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10118 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10119 default:
10120 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10121 }
10122 break;
10123
10124 default:
10125 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10126 }
10127 }
10128
10129 if (pVCpu->iem.s.cActiveMappings)
10130 iemMemRollback(pVCpu);
10131
10132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10133}
10134
10135
10136/**
10137 * Interface for HM and EM for executing string I/O IN (read) instructions.
10138 *
10139 * This API ASSUMES that the caller has already verified that the guest code is
10140 * allowed to access the I/O port. (The I/O port is in the DX register in the
10141 * guest state.)
10142 *
10143 * @returns Strict VBox status code.
10144 * @param pVCpu The cross context virtual CPU structure.
10145 * @param cbValue The size of the I/O port access (1, 2, or 4).
10146 * @param enmAddrMode The addressing mode.
10147 * @param fRepPrefix Indicates whether a repeat prefix is used
10148 * (doesn't matter which for this instruction).
10149 * @param cbInstr The instruction length in bytes.
10150 * @param fIoChecked Whether the access to the I/O port has been
10151 * checked or not. It's typically checked in the
10152 * HM scenario.
10153 */
10154VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10155 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10156{
10157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10158
10159 /*
10160 * State init.
10161 */
10162 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10163
10164 /*
10165 * Switch orgy for getting to the right handler.
10166 */
10167 VBOXSTRICTRC rcStrict;
10168 if (fRepPrefix)
10169 {
10170 switch (enmAddrMode)
10171 {
10172 case IEMMODE_16BIT:
10173 switch (cbValue)
10174 {
10175 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10176 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10177 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10178 default:
10179 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10180 }
10181 break;
10182
10183 case IEMMODE_32BIT:
10184 switch (cbValue)
10185 {
10186 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10187 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10188 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10189 default:
10190 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10191 }
10192 break;
10193
10194 case IEMMODE_64BIT:
10195 switch (cbValue)
10196 {
10197 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10198 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10199 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10200 default:
10201 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10202 }
10203 break;
10204
10205 default:
10206 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10207 }
10208 }
10209 else
10210 {
10211 switch (enmAddrMode)
10212 {
10213 case IEMMODE_16BIT:
10214 switch (cbValue)
10215 {
10216 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10217 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10218 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10219 default:
10220 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10221 }
10222 break;
10223
10224 case IEMMODE_32BIT:
10225 switch (cbValue)
10226 {
10227 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10228 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10229 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10230 default:
10231 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10232 }
10233 break;
10234
10235 case IEMMODE_64BIT:
10236 switch (cbValue)
10237 {
10238 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10239 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10240 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10241 default:
10242 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10243 }
10244 break;
10245
10246 default:
10247 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10248 }
10249 }
10250
10251 if ( pVCpu->iem.s.cActiveMappings == 0
10252 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10253 { /* likely */ }
10254 else
10255 {
10256 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10257 iemMemRollback(pVCpu);
10258 }
10259 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10260}
10261
10262
10263/**
10264 * Interface for rawmode to write execute an OUT instruction.
10265 *
10266 * @returns Strict VBox status code.
10267 * @param pVCpu The cross context virtual CPU structure.
10268 * @param cbInstr The instruction length in bytes.
10269 * @param u16Port The port to read.
10270 * @param fImm Whether the port is specified using an immediate operand or
10271 * using the implicit DX register.
10272 * @param cbReg The register size.
10273 *
10274 * @remarks In ring-0 not all of the state needs to be synced in.
10275 */
10276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10277{
10278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10279 Assert(cbReg <= 4 && cbReg != 3);
10280
10281 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10283 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10284 Assert(!pVCpu->iem.s.cActiveMappings);
10285 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10286}
10287
10288
10289/**
10290 * Interface for rawmode to write execute an IN instruction.
10291 *
10292 * @returns Strict VBox status code.
10293 * @param pVCpu The cross context virtual CPU structure.
10294 * @param cbInstr The instruction length in bytes.
10295 * @param u16Port The port to read.
10296 * @param fImm Whether the port is specified using an immediate operand or
10297 * using the implicit DX.
10298 * @param cbReg The register size.
10299 */
10300VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10301{
10302 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10303 Assert(cbReg <= 4 && cbReg != 3);
10304
10305 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10307 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10308 Assert(!pVCpu->iem.s.cActiveMappings);
10309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10310}
10311
10312
10313/**
10314 * Interface for HM and EM to write to a CRx register.
10315 *
10316 * @returns Strict VBox status code.
10317 * @param pVCpu The cross context virtual CPU structure.
10318 * @param cbInstr The instruction length in bytes.
10319 * @param iCrReg The control register number (destination).
10320 * @param iGReg The general purpose register number (source).
10321 *
10322 * @remarks In ring-0 not all of the state needs to be synced in.
10323 */
10324VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10325{
10326 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10327 Assert(iCrReg < 16);
10328 Assert(iGReg < 16);
10329
10330 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10331 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10332 Assert(!pVCpu->iem.s.cActiveMappings);
10333 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10334}
10335
10336
10337/**
10338 * Interface for HM and EM to read from a CRx register.
10339 *
10340 * @returns Strict VBox status code.
10341 * @param pVCpu The cross context virtual CPU structure.
10342 * @param cbInstr The instruction length in bytes.
10343 * @param iGReg The general purpose register number (destination).
10344 * @param iCrReg The control register number (source).
10345 *
10346 * @remarks In ring-0 not all of the state needs to be synced in.
10347 */
10348VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10349{
10350 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10351 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10352 | CPUMCTX_EXTRN_APIC_TPR);
10353 Assert(iCrReg < 16);
10354 Assert(iGReg < 16);
10355
10356 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10357 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10358 Assert(!pVCpu->iem.s.cActiveMappings);
10359 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10360}
10361
10362
10363/**
10364 * Interface for HM and EM to write to a DRx register.
10365 *
10366 * @returns Strict VBox status code.
10367 * @param pVCpu The cross context virtual CPU structure.
10368 * @param cbInstr The instruction length in bytes.
10369 * @param iDrReg The debug register number (destination).
10370 * @param iGReg The general purpose register number (source).
10371 *
10372 * @remarks In ring-0 not all of the state needs to be synced in.
10373 */
10374VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10375{
10376 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10377 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10378 Assert(iDrReg < 8);
10379 Assert(iGReg < 16);
10380
10381 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10382 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10383 Assert(!pVCpu->iem.s.cActiveMappings);
10384 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10385}
10386
10387
10388/**
10389 * Interface for HM and EM to read from a DRx register.
10390 *
10391 * @returns Strict VBox status code.
10392 * @param pVCpu The cross context virtual CPU structure.
10393 * @param cbInstr The instruction length in bytes.
10394 * @param iGReg The general purpose register number (destination).
10395 * @param iDrReg The debug register number (source).
10396 *
10397 * @remarks In ring-0 not all of the state needs to be synced in.
10398 */
10399VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10400{
10401 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10402 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10403 Assert(iDrReg < 8);
10404 Assert(iGReg < 16);
10405
10406 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10408 Assert(!pVCpu->iem.s.cActiveMappings);
10409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10410}
10411
10412
10413/**
10414 * Interface for HM and EM to clear the CR0[TS] bit.
10415 *
10416 * @returns Strict VBox status code.
10417 * @param pVCpu The cross context virtual CPU structure.
10418 * @param cbInstr The instruction length in bytes.
10419 *
10420 * @remarks In ring-0 not all of the state needs to be synced in.
10421 */
10422VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10423{
10424 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10425
10426 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10427 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10428 Assert(!pVCpu->iem.s.cActiveMappings);
10429 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10430}
10431
10432
10433/**
10434 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10435 *
10436 * @returns Strict VBox status code.
10437 * @param pVCpu The cross context virtual CPU structure.
10438 * @param cbInstr The instruction length in bytes.
10439 * @param uValue The value to load into CR0.
10440 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10441 * memory operand. Otherwise pass NIL_RTGCPTR.
10442 *
10443 * @remarks In ring-0 not all of the state needs to be synced in.
10444 */
10445VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10446{
10447 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10448
10449 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10450 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10451 Assert(!pVCpu->iem.s.cActiveMappings);
10452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10453}
10454
10455
10456/**
10457 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10458 *
10459 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10460 *
10461 * @returns Strict VBox status code.
10462 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10463 * @param cbInstr The instruction length in bytes.
10464 * @remarks In ring-0 not all of the state needs to be synced in.
10465 * @thread EMT(pVCpu)
10466 */
10467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10468{
10469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10470
10471 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10473 Assert(!pVCpu->iem.s.cActiveMappings);
10474 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10475}
10476
10477
10478/**
10479 * Interface for HM and EM to emulate the WBINVD instruction.
10480 *
10481 * @returns Strict VBox status code.
10482 * @param pVCpu The cross context virtual CPU structure.
10483 * @param cbInstr The instruction length in bytes.
10484 *
10485 * @remarks In ring-0 not all of the state needs to be synced in.
10486 */
10487VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10488{
10489 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10490
10491 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10492 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10493 Assert(!pVCpu->iem.s.cActiveMappings);
10494 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10495}
10496
10497
10498/**
10499 * Interface for HM and EM to emulate the INVD instruction.
10500 *
10501 * @returns Strict VBox status code.
10502 * @param pVCpu The cross context virtual CPU structure.
10503 * @param cbInstr The instruction length in bytes.
10504 *
10505 * @remarks In ring-0 not all of the state needs to be synced in.
10506 */
10507VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10508{
10509 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10510
10511 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10513 Assert(!pVCpu->iem.s.cActiveMappings);
10514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10515}
10516
10517
10518/**
10519 * Interface for HM and EM to emulate the INVLPG instruction.
10520 *
10521 * @returns Strict VBox status code.
10522 * @retval VINF_PGM_SYNC_CR3
10523 *
10524 * @param pVCpu The cross context virtual CPU structure.
10525 * @param cbInstr The instruction length in bytes.
10526 * @param GCPtrPage The effective address of the page to invalidate.
10527 *
10528 * @remarks In ring-0 not all of the state needs to be synced in.
10529 */
10530VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10531{
10532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10533
10534 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10536 Assert(!pVCpu->iem.s.cActiveMappings);
10537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10538}
10539
10540
10541/**
10542 * Interface for HM and EM to emulate the INVPCID instruction.
10543 *
10544 * @returns Strict VBox status code.
10545 * @retval VINF_PGM_SYNC_CR3
10546 *
10547 * @param pVCpu The cross context virtual CPU structure.
10548 * @param cbInstr The instruction length in bytes.
10549 * @param iEffSeg The effective segment register.
10550 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10551 * @param uType The invalidation type.
10552 *
10553 * @remarks In ring-0 not all of the state needs to be synced in.
10554 */
10555VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10556 uint64_t uType)
10557{
10558 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10559
10560 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10561 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10562 Assert(!pVCpu->iem.s.cActiveMappings);
10563 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10564}
10565
10566
10567/**
10568 * Interface for HM and EM to emulate the CPUID instruction.
10569 *
10570 * @returns Strict VBox status code.
10571 *
10572 * @param pVCpu The cross context virtual CPU structure.
10573 * @param cbInstr The instruction length in bytes.
10574 *
10575 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10576 */
10577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10578{
10579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10580 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10581
10582 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10584 Assert(!pVCpu->iem.s.cActiveMappings);
10585 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10586}
10587
10588
10589/**
10590 * Interface for HM and EM to emulate the RDPMC instruction.
10591 *
10592 * @returns Strict VBox status code.
10593 *
10594 * @param pVCpu The cross context virtual CPU structure.
10595 * @param cbInstr The instruction length in bytes.
10596 *
10597 * @remarks Not all of the state needs to be synced in.
10598 */
10599VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10600{
10601 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10602 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10603
10604 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10606 Assert(!pVCpu->iem.s.cActiveMappings);
10607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10608}
10609
10610
10611/**
10612 * Interface for HM and EM to emulate the RDTSC instruction.
10613 *
10614 * @returns Strict VBox status code.
10615 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10616 *
10617 * @param pVCpu The cross context virtual CPU structure.
10618 * @param cbInstr The instruction length in bytes.
10619 *
10620 * @remarks Not all of the state needs to be synced in.
10621 */
10622VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10623{
10624 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10625 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10626
10627 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10628 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10629 Assert(!pVCpu->iem.s.cActiveMappings);
10630 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10631}
10632
10633
10634/**
10635 * Interface for HM and EM to emulate the RDTSCP instruction.
10636 *
10637 * @returns Strict VBox status code.
10638 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10639 *
10640 * @param pVCpu The cross context virtual CPU structure.
10641 * @param cbInstr The instruction length in bytes.
10642 *
10643 * @remarks Not all of the state needs to be synced in. Recommended
10644 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10645 */
10646VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10647{
10648 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10649 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10650
10651 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10652 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10653 Assert(!pVCpu->iem.s.cActiveMappings);
10654 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10655}
10656
10657
10658/**
10659 * Interface for HM and EM to emulate the RDMSR instruction.
10660 *
10661 * @returns Strict VBox status code.
10662 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10663 *
10664 * @param pVCpu The cross context virtual CPU structure.
10665 * @param cbInstr The instruction length in bytes.
10666 *
10667 * @remarks Not all of the state needs to be synced in. Requires RCX and
10668 * (currently) all MSRs.
10669 */
10670VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10671{
10672 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10673 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10674
10675 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10676 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10677 Assert(!pVCpu->iem.s.cActiveMappings);
10678 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10679}
10680
10681
10682/**
10683 * Interface for HM and EM to emulate the WRMSR instruction.
10684 *
10685 * @returns Strict VBox status code.
10686 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10687 *
10688 * @param pVCpu The cross context virtual CPU structure.
10689 * @param cbInstr The instruction length in bytes.
10690 *
10691 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10692 * and (currently) all MSRs.
10693 */
10694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10695{
10696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10697 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10698 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10699
10700 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10701 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10702 Assert(!pVCpu->iem.s.cActiveMappings);
10703 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10704}
10705
10706
10707/**
10708 * Interface for HM and EM to emulate the MONITOR instruction.
10709 *
10710 * @returns Strict VBox status code.
10711 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10712 *
10713 * @param pVCpu The cross context virtual CPU structure.
10714 * @param cbInstr The instruction length in bytes.
10715 *
10716 * @remarks Not all of the state needs to be synced in.
10717 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10718 * are used.
10719 */
10720VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10721{
10722 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10723 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10724
10725 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10726 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10727 Assert(!pVCpu->iem.s.cActiveMappings);
10728 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10729}
10730
10731
10732/**
10733 * Interface for HM and EM to emulate the MWAIT instruction.
10734 *
10735 * @returns Strict VBox status code.
10736 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10737 *
10738 * @param pVCpu The cross context virtual CPU structure.
10739 * @param cbInstr The instruction length in bytes.
10740 *
10741 * @remarks Not all of the state needs to be synced in.
10742 */
10743VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10744{
10745 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10746 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10747
10748 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to emulate the HLT instruction.
10757 *
10758 * @returns Strict VBox status code.
10759 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10760 *
10761 * @param pVCpu The cross context virtual CPU structure.
10762 * @param cbInstr The instruction length in bytes.
10763 *
10764 * @remarks Not all of the state needs to be synced in.
10765 */
10766VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10767{
10768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10769
10770 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10771 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10772 Assert(!pVCpu->iem.s.cActiveMappings);
10773 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10774}
10775
10776
10777/**
10778 * Checks if IEM is in the process of delivering an event (interrupt or
10779 * exception).
10780 *
10781 * @returns true if we're in the process of raising an interrupt or exception,
10782 * false otherwise.
10783 * @param pVCpu The cross context virtual CPU structure.
10784 * @param puVector Where to store the vector associated with the
10785 * currently delivered event, optional.
10786 * @param pfFlags Where to store th event delivery flags (see
10787 * IEM_XCPT_FLAGS_XXX), optional.
10788 * @param puErr Where to store the error code associated with the
10789 * event, optional.
10790 * @param puCr2 Where to store the CR2 associated with the event,
10791 * optional.
10792 * @remarks The caller should check the flags to determine if the error code and
10793 * CR2 are valid for the event.
10794 */
10795VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10796{
10797 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10798 if (fRaisingXcpt)
10799 {
10800 if (puVector)
10801 *puVector = pVCpu->iem.s.uCurXcpt;
10802 if (pfFlags)
10803 *pfFlags = pVCpu->iem.s.fCurXcpt;
10804 if (puErr)
10805 *puErr = pVCpu->iem.s.uCurXcptErr;
10806 if (puCr2)
10807 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10808 }
10809 return fRaisingXcpt;
10810}
10811
10812#ifdef IN_RING3
10813
10814/**
10815 * Handles the unlikely and probably fatal merge cases.
10816 *
10817 * @returns Merged status code.
10818 * @param rcStrict Current EM status code.
10819 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10820 * with @a rcStrict.
10821 * @param iMemMap The memory mapping index. For error reporting only.
10822 * @param pVCpu The cross context virtual CPU structure of the calling
10823 * thread, for error reporting only.
10824 */
10825DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10826 unsigned iMemMap, PVMCPUCC pVCpu)
10827{
10828 if (RT_FAILURE_NP(rcStrict))
10829 return rcStrict;
10830
10831 if (RT_FAILURE_NP(rcStrictCommit))
10832 return rcStrictCommit;
10833
10834 if (rcStrict == rcStrictCommit)
10835 return rcStrictCommit;
10836
10837 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10838 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10839 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10842 return VERR_IOM_FF_STATUS_IPE;
10843}
10844
10845
10846/**
10847 * Helper for IOMR3ProcessForceFlag.
10848 *
10849 * @returns Merged status code.
10850 * @param rcStrict Current EM status code.
10851 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10852 * with @a rcStrict.
10853 * @param iMemMap The memory mapping index. For error reporting only.
10854 * @param pVCpu The cross context virtual CPU structure of the calling
10855 * thread, for error reporting only.
10856 */
10857DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10858{
10859 /* Simple. */
10860 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10861 return rcStrictCommit;
10862
10863 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10864 return rcStrict;
10865
10866 /* EM scheduling status codes. */
10867 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10868 && rcStrict <= VINF_EM_LAST))
10869 {
10870 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10871 && rcStrictCommit <= VINF_EM_LAST))
10872 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10873 }
10874
10875 /* Unlikely */
10876 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10877}
10878
10879
10880/**
10881 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10882 *
10883 * @returns Merge between @a rcStrict and what the commit operation returned.
10884 * @param pVM The cross context VM structure.
10885 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10886 * @param rcStrict The status code returned by ring-0 or raw-mode.
10887 */
10888VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10889{
10890 /*
10891 * Reset the pending commit.
10892 */
10893 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10894 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10895 ("%#x %#x %#x\n",
10896 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10897 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10898
10899 /*
10900 * Commit the pending bounce buffers (usually just one).
10901 */
10902 unsigned cBufs = 0;
10903 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10904 while (iMemMap-- > 0)
10905 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10906 {
10907 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10908 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10909 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10910
10911 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10912 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10913 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10914
10915 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10916 {
10917 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10919 pbBuf,
10920 cbFirst,
10921 PGMACCESSORIGIN_IEM);
10922 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10923 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10924 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10925 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10926 }
10927
10928 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10929 {
10930 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10932 pbBuf + cbFirst,
10933 cbSecond,
10934 PGMACCESSORIGIN_IEM);
10935 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10936 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10937 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10938 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10939 }
10940 cBufs++;
10941 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10942 }
10943
10944 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10945 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10946 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10947 pVCpu->iem.s.cActiveMappings = 0;
10948 return rcStrict;
10949}
10950
10951#endif /* IN_RING3 */
10952
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette