VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102727

Last change on this file since 102727 was 102686, checked in by vboxsync, 14 months ago

VMM/IEM: Fixed mixup in IEMTlbInvalidateAllPhysicalAllCpus that would cause trouble with SMP VMs. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.1 KB
Line 
1/* $Id: IEMAll.cpp 102686 2023-12-21 22:50:16Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Look up the physical page info if necessary.
971 */
972 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
973 { /* not necessary */ }
974 else
975 {
976 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
981 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
982 { /* likely */ }
983 else
984 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
985 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
986 | IEMTLBE_F_NO_MAPPINGR3
987 | IEMTLBE_F_PG_NO_READ
988 | IEMTLBE_F_PG_NO_WRITE
989 | IEMTLBE_F_PG_UNASSIGNED
990 | IEMTLBE_F_PG_CODE_PAGE);
991 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
992 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
993 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
994 }
995
996# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
997 /*
998 * Try do a direct read using the pbMappingR3 pointer.
999 */
1000 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1001 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1002 {
1003 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1004 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1005 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1006 {
1007 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1008 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1009 }
1010 else
1011 {
1012 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1013 if (cbInstr + (uint32_t)cbDst <= 15)
1014 {
1015 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1016 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1017 }
1018 else
1019 {
1020 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1021 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1022 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1023 }
1024 }
1025 if (cbDst <= cbMaxRead)
1026 {
1027 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1028 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1029
1030 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1031 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1032 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1033 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1034 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1035 return;
1036 }
1037 pVCpu->iem.s.pbInstrBuf = NULL;
1038
1039 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1040 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1041 }
1042# else
1043# error "refactor as needed"
1044 /*
1045 * If there is no special read handling, so we can read a bit more and
1046 * put it in the prefetch buffer.
1047 */
1048 if ( cbDst < cbMaxRead
1049 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1050 {
1051 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1052 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1053 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1054 { /* likely */ }
1055 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1056 {
1057 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1058 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1059 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1060 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1069 }
1070 }
1071# endif
1072 /*
1073 * Special read handling, so only read exactly what's needed.
1074 * This is a highly unlikely scenario.
1075 */
1076 else
1077 {
1078 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1079
1080 /* Check instruction length. */
1081 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1082 if (RT_LIKELY(cbInstr + cbDst <= 15))
1083 { /* likely */ }
1084 else
1085 {
1086 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1087 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1088 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1089 }
1090
1091 /* Do the reading. */
1092 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1093 if (cbToRead > 0)
1094 {
1095 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1096 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1097 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1098 { /* likely */ }
1099 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1100 {
1101 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1102 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1103 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1104 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1105 }
1106 else
1107 {
1108 Log((RT_SUCCESS(rcStrict)
1109 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1110 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1111 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1112 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1113 }
1114 }
1115
1116 /* Update the state and probably return. */
1117 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1118 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1119 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1120
1121 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1122 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1123 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1124 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1125 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1126 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1127 pVCpu->iem.s.pbInstrBuf = NULL;
1128 if (cbToRead == cbDst)
1129 return;
1130 }
1131
1132 /*
1133 * More to read, loop.
1134 */
1135 cbDst -= cbMaxRead;
1136 pvDst = (uint8_t *)pvDst + cbMaxRead;
1137 }
1138# else /* !IN_RING3 */
1139 RT_NOREF(pvDst, cbDst);
1140 if (pvDst || cbDst)
1141 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1142# endif /* !IN_RING3 */
1143}
1144
1145#else /* !IEM_WITH_CODE_TLB */
1146
1147/**
1148 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1149 * exception if it fails.
1150 *
1151 * @returns Strict VBox status code.
1152 * @param pVCpu The cross context virtual CPU structure of the
1153 * calling thread.
1154 * @param cbMin The minimum number of bytes relative offOpcode
1155 * that must be read.
1156 */
1157VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1158{
1159 /*
1160 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1161 *
1162 * First translate CS:rIP to a physical address.
1163 */
1164 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1165 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1166 uint8_t const cbLeft = cbOpcode - offOpcode;
1167 Assert(cbLeft < cbMin);
1168 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1169
1170 uint32_t cbToTryRead;
1171 RTGCPTR GCPtrNext;
1172 if (IEM_IS_64BIT_CODE(pVCpu))
1173 {
1174 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1175 if (!IEM_IS_CANONICAL(GCPtrNext))
1176 return iemRaiseGeneralProtectionFault0(pVCpu);
1177 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1178 }
1179 else
1180 {
1181 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1182 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1183 GCPtrNext32 += cbOpcode;
1184 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1185 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1186 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1187 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1188 if (!cbToTryRead) /* overflowed */
1189 {
1190 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1191 cbToTryRead = UINT32_MAX;
1192 /** @todo check out wrapping around the code segment. */
1193 }
1194 if (cbToTryRead < cbMin - cbLeft)
1195 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1196 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1197
1198 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1199 if (cbToTryRead > cbLeftOnPage)
1200 cbToTryRead = cbLeftOnPage;
1201 }
1202
1203 /* Restrict to opcode buffer space.
1204
1205 We're making ASSUMPTIONS here based on work done previously in
1206 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1207 be fetched in case of an instruction crossing two pages. */
1208 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1209 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1210 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1211 { /* likely */ }
1212 else
1213 {
1214 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1215 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1216 return iemRaiseGeneralProtectionFault0(pVCpu);
1217 }
1218
1219 PGMPTWALK Walk;
1220 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1221 if (RT_FAILURE(rc))
1222 {
1223 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1224#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1225 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1226 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1227#endif
1228 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1229 }
1230 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1231 {
1232 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1233#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1234 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1235 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1236#endif
1237 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1238 }
1239 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1242#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1243 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1244 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1245#endif
1246 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1247 }
1248 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1249 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1250 /** @todo Check reserved bits and such stuff. PGM is better at doing
1251 * that, so do it when implementing the guest virtual address
1252 * TLB... */
1253
1254 /*
1255 * Read the bytes at this address.
1256 *
1257 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1258 * and since PATM should only patch the start of an instruction there
1259 * should be no need to check again here.
1260 */
1261 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1262 {
1263 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1264 cbToTryRead, PGMACCESSORIGIN_IEM);
1265 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1266 { /* likely */ }
1267 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1268 {
1269 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1270 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1271 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1272 }
1273 else
1274 {
1275 Log((RT_SUCCESS(rcStrict)
1276 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1277 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1278 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1279 return rcStrict;
1280 }
1281 }
1282 else
1283 {
1284 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1285 if (RT_SUCCESS(rc))
1286 { /* likely */ }
1287 else
1288 {
1289 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1290 return rc;
1291 }
1292 }
1293 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1294 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1295
1296 return VINF_SUCCESS;
1297}
1298
1299#endif /* !IEM_WITH_CODE_TLB */
1300#ifndef IEM_WITH_SETJMP
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pVCpu The cross context virtual CPU structure of the
1307 * calling thread.
1308 * @param pb Where to return the opcode byte.
1309 */
1310VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1311{
1312 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1313 if (rcStrict == VINF_SUCCESS)
1314 {
1315 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1316 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1317 pVCpu->iem.s.offOpcode = offOpcode + 1;
1318 }
1319 else
1320 *pb = 0;
1321 return rcStrict;
1322}
1323
1324#else /* IEM_WITH_SETJMP */
1325
1326/**
1327 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1328 *
1329 * @returns The opcode byte.
1330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1331 */
1332uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1333{
1334# ifdef IEM_WITH_CODE_TLB
1335 uint8_t u8;
1336 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1337 return u8;
1338# else
1339 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1340 if (rcStrict == VINF_SUCCESS)
1341 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1342 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1343# endif
1344}
1345
1346#endif /* IEM_WITH_SETJMP */
1347
1348#ifndef IEM_WITH_SETJMP
1349
1350/**
1351 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1355 * @param pu16 Where to return the opcode dword.
1356 */
1357VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1358{
1359 uint8_t u8;
1360 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1361 if (rcStrict == VINF_SUCCESS)
1362 *pu16 = (int8_t)u8;
1363 return rcStrict;
1364}
1365
1366
1367/**
1368 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1369 *
1370 * @returns Strict VBox status code.
1371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1372 * @param pu32 Where to return the opcode dword.
1373 */
1374VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1375{
1376 uint8_t u8;
1377 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1378 if (rcStrict == VINF_SUCCESS)
1379 *pu32 = (int8_t)u8;
1380 return rcStrict;
1381}
1382
1383
1384/**
1385 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 * @param pu64 Where to return the opcode qword.
1390 */
1391VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1392{
1393 uint8_t u8;
1394 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1395 if (rcStrict == VINF_SUCCESS)
1396 *pu64 = (int8_t)u8;
1397 return rcStrict;
1398}
1399
1400#endif /* !IEM_WITH_SETJMP */
1401
1402
1403#ifndef IEM_WITH_SETJMP
1404
1405/**
1406 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1407 *
1408 * @returns Strict VBox status code.
1409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1410 * @param pu16 Where to return the opcode word.
1411 */
1412VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1413{
1414 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1415 if (rcStrict == VINF_SUCCESS)
1416 {
1417 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1418# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1419 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1420# else
1421 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1422# endif
1423 pVCpu->iem.s.offOpcode = offOpcode + 2;
1424 }
1425 else
1426 *pu16 = 0;
1427 return rcStrict;
1428}
1429
1430#else /* IEM_WITH_SETJMP */
1431
1432/**
1433 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1434 *
1435 * @returns The opcode word.
1436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1437 */
1438uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1439{
1440# ifdef IEM_WITH_CODE_TLB
1441 uint16_t u16;
1442 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1443 return u16;
1444# else
1445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1446 if (rcStrict == VINF_SUCCESS)
1447 {
1448 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1449 pVCpu->iem.s.offOpcode += 2;
1450# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1451 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1452# else
1453 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1454# endif
1455 }
1456 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1457# endif
1458}
1459
1460#endif /* IEM_WITH_SETJMP */
1461
1462#ifndef IEM_WITH_SETJMP
1463
1464/**
1465 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1469 * @param pu32 Where to return the opcode double word.
1470 */
1471VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1472{
1473 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1474 if (rcStrict == VINF_SUCCESS)
1475 {
1476 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1477 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1478 pVCpu->iem.s.offOpcode = offOpcode + 2;
1479 }
1480 else
1481 *pu32 = 0;
1482 return rcStrict;
1483}
1484
1485
1486/**
1487 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1491 * @param pu64 Where to return the opcode quad word.
1492 */
1493VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1494{
1495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1496 if (rcStrict == VINF_SUCCESS)
1497 {
1498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1499 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1500 pVCpu->iem.s.offOpcode = offOpcode + 2;
1501 }
1502 else
1503 *pu64 = 0;
1504 return rcStrict;
1505}
1506
1507#endif /* !IEM_WITH_SETJMP */
1508
1509#ifndef IEM_WITH_SETJMP
1510
1511/**
1512 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1513 *
1514 * @returns Strict VBox status code.
1515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1516 * @param pu32 Where to return the opcode dword.
1517 */
1518VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1519{
1520 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1521 if (rcStrict == VINF_SUCCESS)
1522 {
1523 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1525 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1526# else
1527 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1528 pVCpu->iem.s.abOpcode[offOpcode + 1],
1529 pVCpu->iem.s.abOpcode[offOpcode + 2],
1530 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1531# endif
1532 pVCpu->iem.s.offOpcode = offOpcode + 4;
1533 }
1534 else
1535 *pu32 = 0;
1536 return rcStrict;
1537}
1538
1539#else /* IEM_WITH_SETJMP */
1540
1541/**
1542 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1543 *
1544 * @returns The opcode dword.
1545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1546 */
1547uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1548{
1549# ifdef IEM_WITH_CODE_TLB
1550 uint32_t u32;
1551 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1552 return u32;
1553# else
1554 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1555 if (rcStrict == VINF_SUCCESS)
1556 {
1557 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1558 pVCpu->iem.s.offOpcode = offOpcode + 4;
1559# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1560 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1561# else
1562 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1563 pVCpu->iem.s.abOpcode[offOpcode + 1],
1564 pVCpu->iem.s.abOpcode[offOpcode + 2],
1565 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1566# endif
1567 }
1568 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1569# endif
1570}
1571
1572#endif /* IEM_WITH_SETJMP */
1573
1574#ifndef IEM_WITH_SETJMP
1575
1576/**
1577 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1578 *
1579 * @returns Strict VBox status code.
1580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1581 * @param pu64 Where to return the opcode dword.
1582 */
1583VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1584{
1585 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1586 if (rcStrict == VINF_SUCCESS)
1587 {
1588 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1589 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1590 pVCpu->iem.s.abOpcode[offOpcode + 1],
1591 pVCpu->iem.s.abOpcode[offOpcode + 2],
1592 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1593 pVCpu->iem.s.offOpcode = offOpcode + 4;
1594 }
1595 else
1596 *pu64 = 0;
1597 return rcStrict;
1598}
1599
1600
1601/**
1602 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1603 *
1604 * @returns Strict VBox status code.
1605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1606 * @param pu64 Where to return the opcode qword.
1607 */
1608VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1609{
1610 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1611 if (rcStrict == VINF_SUCCESS)
1612 {
1613 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1614 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1615 pVCpu->iem.s.abOpcode[offOpcode + 1],
1616 pVCpu->iem.s.abOpcode[offOpcode + 2],
1617 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1618 pVCpu->iem.s.offOpcode = offOpcode + 4;
1619 }
1620 else
1621 *pu64 = 0;
1622 return rcStrict;
1623}
1624
1625#endif /* !IEM_WITH_SETJMP */
1626
1627#ifndef IEM_WITH_SETJMP
1628
1629/**
1630 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1631 *
1632 * @returns Strict VBox status code.
1633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1634 * @param pu64 Where to return the opcode qword.
1635 */
1636VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1637{
1638 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1639 if (rcStrict == VINF_SUCCESS)
1640 {
1641 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1643 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1644# else
1645 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1646 pVCpu->iem.s.abOpcode[offOpcode + 1],
1647 pVCpu->iem.s.abOpcode[offOpcode + 2],
1648 pVCpu->iem.s.abOpcode[offOpcode + 3],
1649 pVCpu->iem.s.abOpcode[offOpcode + 4],
1650 pVCpu->iem.s.abOpcode[offOpcode + 5],
1651 pVCpu->iem.s.abOpcode[offOpcode + 6],
1652 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1653# endif
1654 pVCpu->iem.s.offOpcode = offOpcode + 8;
1655 }
1656 else
1657 *pu64 = 0;
1658 return rcStrict;
1659}
1660
1661#else /* IEM_WITH_SETJMP */
1662
1663/**
1664 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1665 *
1666 * @returns The opcode qword.
1667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1668 */
1669uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1670{
1671# ifdef IEM_WITH_CODE_TLB
1672 uint64_t u64;
1673 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1674 return u64;
1675# else
1676 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1677 if (rcStrict == VINF_SUCCESS)
1678 {
1679 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1680 pVCpu->iem.s.offOpcode = offOpcode + 8;
1681# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1682 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1683# else
1684 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1685 pVCpu->iem.s.abOpcode[offOpcode + 1],
1686 pVCpu->iem.s.abOpcode[offOpcode + 2],
1687 pVCpu->iem.s.abOpcode[offOpcode + 3],
1688 pVCpu->iem.s.abOpcode[offOpcode + 4],
1689 pVCpu->iem.s.abOpcode[offOpcode + 5],
1690 pVCpu->iem.s.abOpcode[offOpcode + 6],
1691 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1692# endif
1693 }
1694 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1695# endif
1696}
1697
1698#endif /* IEM_WITH_SETJMP */
1699
1700
1701
1702/** @name Misc Worker Functions.
1703 * @{
1704 */
1705
1706/**
1707 * Gets the exception class for the specified exception vector.
1708 *
1709 * @returns The class of the specified exception.
1710 * @param uVector The exception vector.
1711 */
1712static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1713{
1714 Assert(uVector <= X86_XCPT_LAST);
1715 switch (uVector)
1716 {
1717 case X86_XCPT_DE:
1718 case X86_XCPT_TS:
1719 case X86_XCPT_NP:
1720 case X86_XCPT_SS:
1721 case X86_XCPT_GP:
1722 case X86_XCPT_SX: /* AMD only */
1723 return IEMXCPTCLASS_CONTRIBUTORY;
1724
1725 case X86_XCPT_PF:
1726 case X86_XCPT_VE: /* Intel only */
1727 return IEMXCPTCLASS_PAGE_FAULT;
1728
1729 case X86_XCPT_DF:
1730 return IEMXCPTCLASS_DOUBLE_FAULT;
1731 }
1732 return IEMXCPTCLASS_BENIGN;
1733}
1734
1735
1736/**
1737 * Evaluates how to handle an exception caused during delivery of another event
1738 * (exception / interrupt).
1739 *
1740 * @returns How to handle the recursive exception.
1741 * @param pVCpu The cross context virtual CPU structure of the
1742 * calling thread.
1743 * @param fPrevFlags The flags of the previous event.
1744 * @param uPrevVector The vector of the previous event.
1745 * @param fCurFlags The flags of the current exception.
1746 * @param uCurVector The vector of the current exception.
1747 * @param pfXcptRaiseInfo Where to store additional information about the
1748 * exception condition. Optional.
1749 */
1750VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1751 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1752{
1753 /*
1754 * Only CPU exceptions can be raised while delivering other events, software interrupt
1755 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1756 */
1757 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1758 Assert(pVCpu); RT_NOREF(pVCpu);
1759 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1760
1761 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1762 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1763 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1764 {
1765 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1766 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1767 {
1768 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1769 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1770 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1772 {
1773 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1774 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1775 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1776 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1777 uCurVector, pVCpu->cpum.GstCtx.cr2));
1778 }
1779 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1780 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1781 {
1782 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1783 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1784 }
1785 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1786 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1787 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1788 {
1789 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1790 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1791 }
1792 }
1793 else
1794 {
1795 if (uPrevVector == X86_XCPT_NMI)
1796 {
1797 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1798 if (uCurVector == X86_XCPT_PF)
1799 {
1800 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1801 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1802 }
1803 }
1804 else if ( uPrevVector == X86_XCPT_AC
1805 && uCurVector == X86_XCPT_AC)
1806 {
1807 enmRaise = IEMXCPTRAISE_CPU_HANG;
1808 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1809 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1810 }
1811 }
1812 }
1813 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1814 {
1815 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1816 if (uCurVector == X86_XCPT_PF)
1817 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1818 }
1819 else
1820 {
1821 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1822 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1823 }
1824
1825 if (pfXcptRaiseInfo)
1826 *pfXcptRaiseInfo = fRaiseInfo;
1827 return enmRaise;
1828}
1829
1830
1831/**
1832 * Enters the CPU shutdown state initiated by a triple fault or other
1833 * unrecoverable conditions.
1834 *
1835 * @returns Strict VBox status code.
1836 * @param pVCpu The cross context virtual CPU structure of the
1837 * calling thread.
1838 */
1839static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1840{
1841 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1842 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1843
1844 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1845 {
1846 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1847 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1848 }
1849
1850 RT_NOREF(pVCpu);
1851 return VINF_EM_TRIPLE_FAULT;
1852}
1853
1854
1855/**
1856 * Validates a new SS segment.
1857 *
1858 * @returns VBox strict status code.
1859 * @param pVCpu The cross context virtual CPU structure of the
1860 * calling thread.
1861 * @param NewSS The new SS selctor.
1862 * @param uCpl The CPL to load the stack for.
1863 * @param pDesc Where to return the descriptor.
1864 */
1865static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1866{
1867 /* Null selectors are not allowed (we're not called for dispatching
1868 interrupts with SS=0 in long mode). */
1869 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1870 {
1871 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1872 return iemRaiseTaskSwitchFault0(pVCpu);
1873 }
1874
1875 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1876 if ((NewSS & X86_SEL_RPL) != uCpl)
1877 {
1878 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1879 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1880 }
1881
1882 /*
1883 * Read the descriptor.
1884 */
1885 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1886 if (rcStrict != VINF_SUCCESS)
1887 return rcStrict;
1888
1889 /*
1890 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1891 */
1892 if (!pDesc->Legacy.Gen.u1DescType)
1893 {
1894 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1895 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1896 }
1897
1898 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1899 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1900 {
1901 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1902 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1903 }
1904 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1905 {
1906 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1907 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1908 }
1909
1910 /* Is it there? */
1911 /** @todo testcase: Is this checked before the canonical / limit check below? */
1912 if (!pDesc->Legacy.Gen.u1Present)
1913 {
1914 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1915 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1916 }
1917
1918 return VINF_SUCCESS;
1919}
1920
1921/** @} */
1922
1923
1924/** @name Raising Exceptions.
1925 *
1926 * @{
1927 */
1928
1929
1930/**
1931 * Loads the specified stack far pointer from the TSS.
1932 *
1933 * @returns VBox strict status code.
1934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1935 * @param uCpl The CPL to load the stack for.
1936 * @param pSelSS Where to return the new stack segment.
1937 * @param puEsp Where to return the new stack pointer.
1938 */
1939static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1940{
1941 VBOXSTRICTRC rcStrict;
1942 Assert(uCpl < 4);
1943
1944 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1945 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1946 {
1947 /*
1948 * 16-bit TSS (X86TSS16).
1949 */
1950 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1951 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1952 {
1953 uint32_t off = uCpl * 4 + 2;
1954 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1955 {
1956 /** @todo check actual access pattern here. */
1957 uint32_t u32Tmp = 0; /* gcc maybe... */
1958 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1959 if (rcStrict == VINF_SUCCESS)
1960 {
1961 *puEsp = RT_LOWORD(u32Tmp);
1962 *pSelSS = RT_HIWORD(u32Tmp);
1963 return VINF_SUCCESS;
1964 }
1965 }
1966 else
1967 {
1968 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1969 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1970 }
1971 break;
1972 }
1973
1974 /*
1975 * 32-bit TSS (X86TSS32).
1976 */
1977 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1978 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1979 {
1980 uint32_t off = uCpl * 8 + 4;
1981 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1982 {
1983/** @todo check actual access pattern here. */
1984 uint64_t u64Tmp;
1985 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1986 if (rcStrict == VINF_SUCCESS)
1987 {
1988 *puEsp = u64Tmp & UINT32_MAX;
1989 *pSelSS = (RTSEL)(u64Tmp >> 32);
1990 return VINF_SUCCESS;
1991 }
1992 }
1993 else
1994 {
1995 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1996 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1997 }
1998 break;
1999 }
2000
2001 default:
2002 AssertFailed();
2003 rcStrict = VERR_IEM_IPE_4;
2004 break;
2005 }
2006
2007 *puEsp = 0; /* make gcc happy */
2008 *pSelSS = 0; /* make gcc happy */
2009 return rcStrict;
2010}
2011
2012
2013/**
2014 * Loads the specified stack pointer from the 64-bit TSS.
2015 *
2016 * @returns VBox strict status code.
2017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2018 * @param uCpl The CPL to load the stack for.
2019 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2020 * @param puRsp Where to return the new stack pointer.
2021 */
2022static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2023{
2024 Assert(uCpl < 4);
2025 Assert(uIst < 8);
2026 *puRsp = 0; /* make gcc happy */
2027
2028 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2029 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2030
2031 uint32_t off;
2032 if (uIst)
2033 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2034 else
2035 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2036 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2037 {
2038 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2039 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2040 }
2041
2042 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2043}
2044
2045
2046/**
2047 * Adjust the CPU state according to the exception being raised.
2048 *
2049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2050 * @param u8Vector The exception that has been raised.
2051 */
2052DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2053{
2054 switch (u8Vector)
2055 {
2056 case X86_XCPT_DB:
2057 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2058 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2059 break;
2060 /** @todo Read the AMD and Intel exception reference... */
2061 }
2062}
2063
2064
2065/**
2066 * Implements exceptions and interrupts for real mode.
2067 *
2068 * @returns VBox strict status code.
2069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2070 * @param cbInstr The number of bytes to offset rIP by in the return
2071 * address.
2072 * @param u8Vector The interrupt / exception vector number.
2073 * @param fFlags The flags.
2074 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2075 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2076 */
2077static VBOXSTRICTRC
2078iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2079 uint8_t cbInstr,
2080 uint8_t u8Vector,
2081 uint32_t fFlags,
2082 uint16_t uErr,
2083 uint64_t uCr2) RT_NOEXCEPT
2084{
2085 NOREF(uErr); NOREF(uCr2);
2086 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2087
2088 /*
2089 * Read the IDT entry.
2090 */
2091 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2092 {
2093 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2094 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2095 }
2096 RTFAR16 Idte;
2097 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2098 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2099 {
2100 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2101 return rcStrict;
2102 }
2103
2104 /*
2105 * Push the stack frame.
2106 */
2107 uint8_t bUnmapInfo;
2108 uint16_t *pu16Frame;
2109 uint64_t uNewRsp;
2110 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2111 if (rcStrict != VINF_SUCCESS)
2112 return rcStrict;
2113
2114 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2115#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2116 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2117 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2118 fEfl |= UINT16_C(0xf000);
2119#endif
2120 pu16Frame[2] = (uint16_t)fEfl;
2121 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2122 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2123 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2124 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2125 return rcStrict;
2126
2127 /*
2128 * Load the vector address into cs:ip and make exception specific state
2129 * adjustments.
2130 */
2131 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2132 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2134 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2135 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2136 pVCpu->cpum.GstCtx.rip = Idte.off;
2137 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2138 IEMMISC_SET_EFL(pVCpu, fEfl);
2139
2140 /** @todo do we actually do this in real mode? */
2141 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2142 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2143
2144 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2145 so best leave them alone in case we're in a weird kind of real mode... */
2146
2147 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2148}
2149
2150
2151/**
2152 * Loads a NULL data selector into when coming from V8086 mode.
2153 *
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pSReg Pointer to the segment register.
2156 */
2157DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2158{
2159 pSReg->Sel = 0;
2160 pSReg->ValidSel = 0;
2161 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2162 {
2163 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2164 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2165 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2166 }
2167 else
2168 {
2169 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2170 /** @todo check this on AMD-V */
2171 pSReg->u64Base = 0;
2172 pSReg->u32Limit = 0;
2173 }
2174}
2175
2176
2177/**
2178 * Loads a segment selector during a task switch in V8086 mode.
2179 *
2180 * @param pSReg Pointer to the segment register.
2181 * @param uSel The selector value to load.
2182 */
2183DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2184{
2185 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2186 pSReg->Sel = uSel;
2187 pSReg->ValidSel = uSel;
2188 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2189 pSReg->u64Base = uSel << 4;
2190 pSReg->u32Limit = 0xffff;
2191 pSReg->Attr.u = 0xf3;
2192}
2193
2194
2195/**
2196 * Loads a segment selector during a task switch in protected mode.
2197 *
2198 * In this task switch scenario, we would throw \#TS exceptions rather than
2199 * \#GPs.
2200 *
2201 * @returns VBox strict status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pSReg Pointer to the segment register.
2204 * @param uSel The new selector value.
2205 *
2206 * @remarks This does _not_ handle CS or SS.
2207 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2208 */
2209static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2210{
2211 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2212
2213 /* Null data selector. */
2214 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2215 {
2216 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2218 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2219 return VINF_SUCCESS;
2220 }
2221
2222 /* Fetch the descriptor. */
2223 IEMSELDESC Desc;
2224 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2225 if (rcStrict != VINF_SUCCESS)
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2228 VBOXSTRICTRC_VAL(rcStrict)));
2229 return rcStrict;
2230 }
2231
2232 /* Must be a data segment or readable code segment. */
2233 if ( !Desc.Legacy.Gen.u1DescType
2234 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2237 Desc.Legacy.Gen.u4Type));
2238 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2239 }
2240
2241 /* Check privileges for data segments and non-conforming code segments. */
2242 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2243 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 {
2245 /* The RPL and the new CPL must be less than or equal to the DPL. */
2246 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2247 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2248 {
2249 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2250 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2252 }
2253 }
2254
2255 /* Is it there? */
2256 if (!Desc.Legacy.Gen.u1Present)
2257 {
2258 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2259 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* The base and limit. */
2263 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2264 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2265
2266 /*
2267 * Ok, everything checked out fine. Now set the accessed bit before
2268 * committing the result into the registers.
2269 */
2270 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2271 {
2272 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2273 if (rcStrict != VINF_SUCCESS)
2274 return rcStrict;
2275 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2276 }
2277
2278 /* Commit */
2279 pSReg->Sel = uSel;
2280 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2281 pSReg->u32Limit = cbLimit;
2282 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2283 pSReg->ValidSel = uSel;
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2286 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2287
2288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2289 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Performs a task switch.
2296 *
2297 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2298 * caller is responsible for performing the necessary checks (like DPL, TSS
2299 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2300 * reference for JMP, CALL, IRET.
2301 *
2302 * If the task switch is the due to a software interrupt or hardware exception,
2303 * the caller is responsible for validating the TSS selector and descriptor. See
2304 * Intel Instruction reference for INT n.
2305 *
2306 * @returns VBox strict status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param enmTaskSwitch The cause of the task switch.
2309 * @param uNextEip The EIP effective after the task switch.
2310 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2311 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2312 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2313 * @param SelTss The TSS selector of the new task.
2314 * @param pNewDescTss Pointer to the new TSS descriptor.
2315 */
2316VBOXSTRICTRC
2317iemTaskSwitch(PVMCPUCC pVCpu,
2318 IEMTASKSWITCH enmTaskSwitch,
2319 uint32_t uNextEip,
2320 uint32_t fFlags,
2321 uint16_t uErr,
2322 uint64_t uCr2,
2323 RTSEL SelTss,
2324 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2325{
2326 Assert(!IEM_IS_REAL_MODE(pVCpu));
2327 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2328 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2329
2330 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2331 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2332 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2333 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2334 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2335
2336 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2337 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2338
2339 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2340 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2341
2342 /* Update CR2 in case it's a page-fault. */
2343 /** @todo This should probably be done much earlier in IEM/PGM. See
2344 * @bugref{5653#c49}. */
2345 if (fFlags & IEM_XCPT_FLAGS_CR2)
2346 pVCpu->cpum.GstCtx.cr2 = uCr2;
2347
2348 /*
2349 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2350 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2351 */
2352 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2353 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2354 if (uNewTssLimit < uNewTssLimitMin)
2355 {
2356 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2357 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2359 }
2360
2361 /*
2362 * Task switches in VMX non-root mode always cause task switches.
2363 * The new TSS must have been read and validated (DPL, limits etc.) before a
2364 * task-switch VM-exit commences.
2365 *
2366 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2367 */
2368 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2369 {
2370 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2371 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2372 }
2373
2374 /*
2375 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2376 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2377 */
2378 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2379 {
2380 uint32_t const uExitInfo1 = SelTss;
2381 uint32_t uExitInfo2 = uErr;
2382 switch (enmTaskSwitch)
2383 {
2384 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2385 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2386 default: break;
2387 }
2388 if (fFlags & IEM_XCPT_FLAGS_ERR)
2389 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2390 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2391 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2392
2393 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2394 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2395 RT_NOREF2(uExitInfo1, uExitInfo2);
2396 }
2397
2398 /*
2399 * Check the current TSS limit. The last written byte to the current TSS during the
2400 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2401 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2402 *
2403 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2404 * end up with smaller than "legal" TSS limits.
2405 */
2406 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2407 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2408 if (uCurTssLimit < uCurTssLimitMin)
2409 {
2410 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2411 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2412 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2413 }
2414
2415 /*
2416 * Verify that the new TSS can be accessed and map it. Map only the required contents
2417 * and not the entire TSS.
2418 */
2419 uint8_t bUnmapInfoNewTss;
2420 void *pvNewTss;
2421 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2422 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2423 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2424 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2425 * not perform correct translation if this happens. See Intel spec. 7.2.1
2426 * "Task-State Segment". */
2427 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2428/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2429 * Consider wrapping the remainder into a function for simpler cleanup. */
2430 if (rcStrict != VINF_SUCCESS)
2431 {
2432 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2433 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2434 return rcStrict;
2435 }
2436
2437 /*
2438 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2439 */
2440 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2441 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2442 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2443 {
2444 uint8_t bUnmapInfoDescCurTss;
2445 PX86DESC pDescCurTss;
2446 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2447 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2448 if (rcStrict != VINF_SUCCESS)
2449 {
2450 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2451 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2452 return rcStrict;
2453 }
2454
2455 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2456 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2457 if (rcStrict != VINF_SUCCESS)
2458 {
2459 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2460 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2461 return rcStrict;
2462 }
2463
2464 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2465 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2466 {
2467 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2468 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2469 fEFlags &= ~X86_EFL_NT;
2470 }
2471 }
2472
2473 /*
2474 * Save the CPU state into the current TSS.
2475 */
2476 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2477 if (GCPtrNewTss == GCPtrCurTss)
2478 {
2479 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2480 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2481 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2482 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2483 pVCpu->cpum.GstCtx.ldtr.Sel));
2484 }
2485 if (fIsNewTss386)
2486 {
2487 /*
2488 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2489 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2490 */
2491 uint8_t bUnmapInfoCurTss32;
2492 void *pvCurTss32;
2493 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2494 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2495 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2496 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2497 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2498 if (rcStrict != VINF_SUCCESS)
2499 {
2500 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2501 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2502 return rcStrict;
2503 }
2504
2505 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2506 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2507 pCurTss32->eip = uNextEip;
2508 pCurTss32->eflags = fEFlags;
2509 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2510 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2511 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2512 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2513 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2514 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2515 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2516 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2517 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2518 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2519 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2520 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2521 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2522 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2523
2524 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2525 if (rcStrict != VINF_SUCCESS)
2526 {
2527 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2528 VBOXSTRICTRC_VAL(rcStrict)));
2529 return rcStrict;
2530 }
2531 }
2532 else
2533 {
2534 /*
2535 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2536 */
2537 uint8_t bUnmapInfoCurTss16;
2538 void *pvCurTss16;
2539 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2540 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2541 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2542 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2543 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2544 if (rcStrict != VINF_SUCCESS)
2545 {
2546 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2547 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2548 return rcStrict;
2549 }
2550
2551 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2552 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2553 pCurTss16->ip = uNextEip;
2554 pCurTss16->flags = (uint16_t)fEFlags;
2555 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2556 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2557 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2558 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2559 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2560 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2561 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2562 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2563 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2564 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2565 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2566 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2567
2568 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2569 if (rcStrict != VINF_SUCCESS)
2570 {
2571 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2572 VBOXSTRICTRC_VAL(rcStrict)));
2573 return rcStrict;
2574 }
2575 }
2576
2577 /*
2578 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2579 */
2580 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2581 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2582 {
2583 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2584 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2585 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2586 }
2587
2588 /*
2589 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2590 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2591 */
2592 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2593 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2594 bool fNewDebugTrap;
2595 if (fIsNewTss386)
2596 {
2597 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2598 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2599 uNewEip = pNewTss32->eip;
2600 uNewEflags = pNewTss32->eflags;
2601 uNewEax = pNewTss32->eax;
2602 uNewEcx = pNewTss32->ecx;
2603 uNewEdx = pNewTss32->edx;
2604 uNewEbx = pNewTss32->ebx;
2605 uNewEsp = pNewTss32->esp;
2606 uNewEbp = pNewTss32->ebp;
2607 uNewEsi = pNewTss32->esi;
2608 uNewEdi = pNewTss32->edi;
2609 uNewES = pNewTss32->es;
2610 uNewCS = pNewTss32->cs;
2611 uNewSS = pNewTss32->ss;
2612 uNewDS = pNewTss32->ds;
2613 uNewFS = pNewTss32->fs;
2614 uNewGS = pNewTss32->gs;
2615 uNewLdt = pNewTss32->selLdt;
2616 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2617 }
2618 else
2619 {
2620 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2621 uNewCr3 = 0;
2622 uNewEip = pNewTss16->ip;
2623 uNewEflags = pNewTss16->flags;
2624 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2625 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2626 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2627 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2628 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2629 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2630 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2631 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2632 uNewES = pNewTss16->es;
2633 uNewCS = pNewTss16->cs;
2634 uNewSS = pNewTss16->ss;
2635 uNewDS = pNewTss16->ds;
2636 uNewFS = 0;
2637 uNewGS = 0;
2638 uNewLdt = pNewTss16->selLdt;
2639 fNewDebugTrap = false;
2640 }
2641
2642 if (GCPtrNewTss == GCPtrCurTss)
2643 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2644 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2645
2646 /*
2647 * We're done accessing the new TSS.
2648 */
2649 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2650 if (rcStrict != VINF_SUCCESS)
2651 {
2652 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2653 return rcStrict;
2654 }
2655
2656 /*
2657 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2658 */
2659 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2660 {
2661 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2662 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2663 if (rcStrict != VINF_SUCCESS)
2664 {
2665 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2666 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2667 return rcStrict;
2668 }
2669
2670 /* Check that the descriptor indicates the new TSS is available (not busy). */
2671 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2672 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2673 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2674
2675 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2676 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2677 if (rcStrict != VINF_SUCCESS)
2678 {
2679 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2680 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683 }
2684
2685 /*
2686 * From this point on, we're technically in the new task. We will defer exceptions
2687 * until the completion of the task switch but before executing any instructions in the new task.
2688 */
2689 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2690 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2691 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2692 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2693 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2694 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2695 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2696
2697 /* Set the busy bit in TR. */
2698 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2699
2700 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2701 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2702 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2703 {
2704 uNewEflags |= X86_EFL_NT;
2705 }
2706
2707 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2708 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2709 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2710
2711 pVCpu->cpum.GstCtx.eip = uNewEip;
2712 pVCpu->cpum.GstCtx.eax = uNewEax;
2713 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2714 pVCpu->cpum.GstCtx.edx = uNewEdx;
2715 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2716 pVCpu->cpum.GstCtx.esp = uNewEsp;
2717 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2718 pVCpu->cpum.GstCtx.esi = uNewEsi;
2719 pVCpu->cpum.GstCtx.edi = uNewEdi;
2720
2721 uNewEflags &= X86_EFL_LIVE_MASK;
2722 uNewEflags |= X86_EFL_RA1_MASK;
2723 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2724
2725 /*
2726 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2727 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2728 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2729 */
2730 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2731 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2732
2733 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2734 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2735
2736 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2737 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2738
2739 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2740 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2741
2742 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2743 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2744
2745 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2746 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2747 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2748
2749 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2750 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2751 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2752 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2753
2754 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2755 {
2756 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2757 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2758 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2759 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2760 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2761 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2762 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2763 }
2764
2765 /*
2766 * Switch CR3 for the new task.
2767 */
2768 if ( fIsNewTss386
2769 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2770 {
2771 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2772 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2773 AssertRCSuccessReturn(rc, rc);
2774
2775 /* Inform PGM. */
2776 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2777 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2778 AssertRCReturn(rc, rc);
2779 /* ignore informational status codes */
2780
2781 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2782 }
2783
2784 /*
2785 * Switch LDTR for the new task.
2786 */
2787 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2788 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2789 else
2790 {
2791 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2792
2793 IEMSELDESC DescNewLdt;
2794 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2795 if (rcStrict != VINF_SUCCESS)
2796 {
2797 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2798 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2799 return rcStrict;
2800 }
2801 if ( !DescNewLdt.Legacy.Gen.u1Present
2802 || DescNewLdt.Legacy.Gen.u1DescType
2803 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2804 {
2805 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2806 uNewLdt, DescNewLdt.Legacy.u));
2807 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2808 }
2809
2810 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2811 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2812 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2813 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2814 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2815 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2816 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2817 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2818 }
2819
2820 IEMSELDESC DescSS;
2821 if (IEM_IS_V86_MODE(pVCpu))
2822 {
2823 IEM_SET_CPL(pVCpu, 3);
2824 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2825 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2826 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2827 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2828 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2829 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2830
2831 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2832 DescSS.Legacy.u = 0;
2833 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2834 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2835 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2836 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2837 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2838 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2839 DescSS.Legacy.Gen.u2Dpl = 3;
2840 }
2841 else
2842 {
2843 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2844
2845 /*
2846 * Load the stack segment for the new task.
2847 */
2848 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2849 {
2850 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2851 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2852 }
2853
2854 /* Fetch the descriptor. */
2855 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2856 if (rcStrict != VINF_SUCCESS)
2857 {
2858 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2859 VBOXSTRICTRC_VAL(rcStrict)));
2860 return rcStrict;
2861 }
2862
2863 /* SS must be a data segment and writable. */
2864 if ( !DescSS.Legacy.Gen.u1DescType
2865 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2866 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2867 {
2868 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2869 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2870 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2871 }
2872
2873 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2874 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2875 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2876 {
2877 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2878 uNewCpl));
2879 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2880 }
2881
2882 /* Is it there? */
2883 if (!DescSS.Legacy.Gen.u1Present)
2884 {
2885 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2886 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2887 }
2888
2889 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2890 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2891
2892 /* Set the accessed bit before committing the result into SS. */
2893 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2894 {
2895 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2896 if (rcStrict != VINF_SUCCESS)
2897 return rcStrict;
2898 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2899 }
2900
2901 /* Commit SS. */
2902 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2903 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2904 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2905 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2906 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2907 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2908 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2909
2910 /* CPL has changed, update IEM before loading rest of segments. */
2911 IEM_SET_CPL(pVCpu, uNewCpl);
2912
2913 /*
2914 * Load the data segments for the new task.
2915 */
2916 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2920 if (rcStrict != VINF_SUCCESS)
2921 return rcStrict;
2922 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2923 if (rcStrict != VINF_SUCCESS)
2924 return rcStrict;
2925 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2926 if (rcStrict != VINF_SUCCESS)
2927 return rcStrict;
2928
2929 /*
2930 * Load the code segment for the new task.
2931 */
2932 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2933 {
2934 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2935 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2936 }
2937
2938 /* Fetch the descriptor. */
2939 IEMSELDESC DescCS;
2940 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2941 if (rcStrict != VINF_SUCCESS)
2942 {
2943 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2944 return rcStrict;
2945 }
2946
2947 /* CS must be a code segment. */
2948 if ( !DescCS.Legacy.Gen.u1DescType
2949 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2950 {
2951 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2952 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2953 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2954 }
2955
2956 /* For conforming CS, DPL must be less than or equal to the RPL. */
2957 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2958 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2959 {
2960 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2961 DescCS.Legacy.Gen.u2Dpl));
2962 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2963 }
2964
2965 /* For non-conforming CS, DPL must match RPL. */
2966 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2967 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2968 {
2969 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2970 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2971 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2972 }
2973
2974 /* Is it there? */
2975 if (!DescCS.Legacy.Gen.u1Present)
2976 {
2977 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2978 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2982 u64Base = X86DESC_BASE(&DescCS.Legacy);
2983
2984 /* Set the accessed bit before committing the result into CS. */
2985 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2986 {
2987 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2988 if (rcStrict != VINF_SUCCESS)
2989 return rcStrict;
2990 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2991 }
2992
2993 /* Commit CS. */
2994 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2995 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2996 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2997 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2998 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2999 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3001 }
3002
3003 /* Make sure the CPU mode is correct. */
3004 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3005 if (fExecNew != pVCpu->iem.s.fExec)
3006 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3007 pVCpu->iem.s.fExec = fExecNew;
3008
3009 /** @todo Debug trap. */
3010 if (fIsNewTss386 && fNewDebugTrap)
3011 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3012
3013 /*
3014 * Construct the error code masks based on what caused this task switch.
3015 * See Intel Instruction reference for INT.
3016 */
3017 uint16_t uExt;
3018 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3019 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3020 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3021 uExt = 1;
3022 else
3023 uExt = 0;
3024
3025 /*
3026 * Push any error code on to the new stack.
3027 */
3028 if (fFlags & IEM_XCPT_FLAGS_ERR)
3029 {
3030 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3031 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3032 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3033
3034 /* Check that there is sufficient space on the stack. */
3035 /** @todo Factor out segment limit checking for normal/expand down segments
3036 * into a separate function. */
3037 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3038 {
3039 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3040 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3041 {
3042 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3043 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3044 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3045 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3046 }
3047 }
3048 else
3049 {
3050 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3051 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3052 {
3053 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3054 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3055 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3056 }
3057 }
3058
3059
3060 if (fIsNewTss386)
3061 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3062 else
3063 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3064 if (rcStrict != VINF_SUCCESS)
3065 {
3066 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3067 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3068 return rcStrict;
3069 }
3070 }
3071
3072 /* Check the new EIP against the new CS limit. */
3073 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3074 {
3075 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3076 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3077 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3078 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3079 }
3080
3081 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3082 pVCpu->cpum.GstCtx.ss.Sel));
3083 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3084}
3085
3086
3087/**
3088 * Implements exceptions and interrupts for protected mode.
3089 *
3090 * @returns VBox strict status code.
3091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3092 * @param cbInstr The number of bytes to offset rIP by in the return
3093 * address.
3094 * @param u8Vector The interrupt / exception vector number.
3095 * @param fFlags The flags.
3096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3098 */
3099static VBOXSTRICTRC
3100iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3101 uint8_t cbInstr,
3102 uint8_t u8Vector,
3103 uint32_t fFlags,
3104 uint16_t uErr,
3105 uint64_t uCr2) RT_NOEXCEPT
3106{
3107 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3108
3109 /*
3110 * Read the IDT entry.
3111 */
3112 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3113 {
3114 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3115 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3116 }
3117 X86DESC Idte;
3118 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3119 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3120 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3121 {
3122 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3123 return rcStrict;
3124 }
3125 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3126 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3127 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3128 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3129
3130 /*
3131 * Check the descriptor type, DPL and such.
3132 * ASSUMES this is done in the same order as described for call-gate calls.
3133 */
3134 if (Idte.Gate.u1DescType)
3135 {
3136 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3137 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3138 }
3139 bool fTaskGate = false;
3140 uint8_t f32BitGate = true;
3141 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3142 switch (Idte.Gate.u4Type)
3143 {
3144 case X86_SEL_TYPE_SYS_UNDEFINED:
3145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3146 case X86_SEL_TYPE_SYS_LDT:
3147 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3148 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3149 case X86_SEL_TYPE_SYS_UNDEFINED2:
3150 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3151 case X86_SEL_TYPE_SYS_UNDEFINED3:
3152 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3153 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3154 case X86_SEL_TYPE_SYS_UNDEFINED4:
3155 {
3156 /** @todo check what actually happens when the type is wrong...
3157 * esp. call gates. */
3158 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3159 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3160 }
3161
3162 case X86_SEL_TYPE_SYS_286_INT_GATE:
3163 f32BitGate = false;
3164 RT_FALL_THRU();
3165 case X86_SEL_TYPE_SYS_386_INT_GATE:
3166 fEflToClear |= X86_EFL_IF;
3167 break;
3168
3169 case X86_SEL_TYPE_SYS_TASK_GATE:
3170 fTaskGate = true;
3171#ifndef IEM_IMPLEMENTS_TASKSWITCH
3172 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3173#endif
3174 break;
3175
3176 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3177 f32BitGate = false;
3178 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3179 break;
3180
3181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3182 }
3183
3184 /* Check DPL against CPL if applicable. */
3185 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3186 {
3187 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3188 {
3189 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3190 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3191 }
3192 }
3193
3194 /* Is it there? */
3195 if (!Idte.Gate.u1Present)
3196 {
3197 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3198 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3199 }
3200
3201 /* Is it a task-gate? */
3202 if (fTaskGate)
3203 {
3204 /*
3205 * Construct the error code masks based on what caused this task switch.
3206 * See Intel Instruction reference for INT.
3207 */
3208 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3209 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3210 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3211 RTSEL SelTss = Idte.Gate.u16Sel;
3212
3213 /*
3214 * Fetch the TSS descriptor in the GDT.
3215 */
3216 IEMSELDESC DescTSS;
3217 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3218 if (rcStrict != VINF_SUCCESS)
3219 {
3220 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3221 VBOXSTRICTRC_VAL(rcStrict)));
3222 return rcStrict;
3223 }
3224
3225 /* The TSS descriptor must be a system segment and be available (not busy). */
3226 if ( DescTSS.Legacy.Gen.u1DescType
3227 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3228 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3231 u8Vector, SelTss, DescTSS.Legacy.au64));
3232 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3233 }
3234
3235 /* The TSS must be present. */
3236 if (!DescTSS.Legacy.Gen.u1Present)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3239 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3240 }
3241
3242 /* Do the actual task switch. */
3243 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3244 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3245 fFlags, uErr, uCr2, SelTss, &DescTSS);
3246 }
3247
3248 /* A null CS is bad. */
3249 RTSEL NewCS = Idte.Gate.u16Sel;
3250 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3253 return iemRaiseGeneralProtectionFault0(pVCpu);
3254 }
3255
3256 /* Fetch the descriptor for the new CS. */
3257 IEMSELDESC DescCS;
3258 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3259 if (rcStrict != VINF_SUCCESS)
3260 {
3261 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3262 return rcStrict;
3263 }
3264
3265 /* Must be a code segment. */
3266 if (!DescCS.Legacy.Gen.u1DescType)
3267 {
3268 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3269 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3270 }
3271 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3272 {
3273 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3274 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3275 }
3276
3277 /* Don't allow lowering the privilege level. */
3278 /** @todo Does the lowering of privileges apply to software interrupts
3279 * only? This has bearings on the more-privileged or
3280 * same-privilege stack behavior further down. A testcase would
3281 * be nice. */
3282 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3283 {
3284 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3285 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3286 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3287 }
3288
3289 /* Make sure the selector is present. */
3290 if (!DescCS.Legacy.Gen.u1Present)
3291 {
3292 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3293 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3294 }
3295
3296#ifdef LOG_ENABLED
3297 /* If software interrupt, try decode it if logging is enabled and such. */
3298 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3299 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3300 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3301#endif
3302
3303 /* Check the new EIP against the new CS limit. */
3304 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3305 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3306 ? Idte.Gate.u16OffsetLow
3307 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3308 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3309 if (uNewEip > cbLimitCS)
3310 {
3311 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3312 u8Vector, uNewEip, cbLimitCS, NewCS));
3313 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3314 }
3315 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3316
3317 /* Calc the flag image to push. */
3318 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3319 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3320 fEfl &= ~X86_EFL_RF;
3321 else
3322 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3323
3324 /* From V8086 mode only go to CPL 0. */
3325 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3326 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3327 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3328 {
3329 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3330 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3331 }
3332
3333 /*
3334 * If the privilege level changes, we need to get a new stack from the TSS.
3335 * This in turns means validating the new SS and ESP...
3336 */
3337 if (uNewCpl != IEM_GET_CPL(pVCpu))
3338 {
3339 RTSEL NewSS;
3340 uint32_t uNewEsp;
3341 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3342 if (rcStrict != VINF_SUCCESS)
3343 return rcStrict;
3344
3345 IEMSELDESC DescSS;
3346 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3347 if (rcStrict != VINF_SUCCESS)
3348 return rcStrict;
3349 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3350 if (!DescSS.Legacy.Gen.u1DefBig)
3351 {
3352 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3353 uNewEsp = (uint16_t)uNewEsp;
3354 }
3355
3356 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3357
3358 /* Check that there is sufficient space for the stack frame. */
3359 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3360 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3361 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3362 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3363
3364 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3365 {
3366 if ( uNewEsp - 1 > cbLimitSS
3367 || uNewEsp < cbStackFrame)
3368 {
3369 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3370 u8Vector, NewSS, uNewEsp, cbStackFrame));
3371 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3372 }
3373 }
3374 else
3375 {
3376 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3377 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3378 {
3379 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3380 u8Vector, NewSS, uNewEsp, cbStackFrame));
3381 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3382 }
3383 }
3384
3385 /*
3386 * Start making changes.
3387 */
3388
3389 /* Set the new CPL so that stack accesses use it. */
3390 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3391 IEM_SET_CPL(pVCpu, uNewCpl);
3392
3393 /* Create the stack frame. */
3394 uint8_t bUnmapInfoStackFrame;
3395 RTPTRUNION uStackFrame;
3396 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3397 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3398 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3399 if (rcStrict != VINF_SUCCESS)
3400 return rcStrict;
3401 if (f32BitGate)
3402 {
3403 if (fFlags & IEM_XCPT_FLAGS_ERR)
3404 *uStackFrame.pu32++ = uErr;
3405 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3406 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3407 uStackFrame.pu32[2] = fEfl;
3408 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3409 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3410 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3411 if (fEfl & X86_EFL_VM)
3412 {
3413 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3414 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3415 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3416 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3417 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3418 }
3419 }
3420 else
3421 {
3422 if (fFlags & IEM_XCPT_FLAGS_ERR)
3423 *uStackFrame.pu16++ = uErr;
3424 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3425 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3426 uStackFrame.pu16[2] = fEfl;
3427 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3428 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3429 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3430 if (fEfl & X86_EFL_VM)
3431 {
3432 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3433 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3434 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3435 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3436 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3437 }
3438 }
3439 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3440 if (rcStrict != VINF_SUCCESS)
3441 return rcStrict;
3442
3443 /* Mark the selectors 'accessed' (hope this is the correct time). */
3444 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3445 * after pushing the stack frame? (Write protect the gdt + stack to
3446 * find out.) */
3447 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3448 {
3449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3453 }
3454
3455 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3456 {
3457 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3458 if (rcStrict != VINF_SUCCESS)
3459 return rcStrict;
3460 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3461 }
3462
3463 /*
3464 * Start comitting the register changes (joins with the DPL=CPL branch).
3465 */
3466 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3467 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3468 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3469 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3470 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3471 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3472 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3473 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3474 * SP is loaded).
3475 * Need to check the other combinations too:
3476 * - 16-bit TSS, 32-bit handler
3477 * - 32-bit TSS, 16-bit handler */
3478 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3479 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3480 else
3481 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3482
3483 if (fEfl & X86_EFL_VM)
3484 {
3485 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3486 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3487 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3488 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3489 }
3490 }
3491 /*
3492 * Same privilege, no stack change and smaller stack frame.
3493 */
3494 else
3495 {
3496 uint64_t uNewRsp;
3497 uint8_t bUnmapInfoStackFrame;
3498 RTPTRUNION uStackFrame;
3499 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3500 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3501 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3502 if (rcStrict != VINF_SUCCESS)
3503 return rcStrict;
3504
3505 if (f32BitGate)
3506 {
3507 if (fFlags & IEM_XCPT_FLAGS_ERR)
3508 *uStackFrame.pu32++ = uErr;
3509 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3510 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3511 uStackFrame.pu32[2] = fEfl;
3512 }
3513 else
3514 {
3515 if (fFlags & IEM_XCPT_FLAGS_ERR)
3516 *uStackFrame.pu16++ = uErr;
3517 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3518 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3519 uStackFrame.pu16[2] = fEfl;
3520 }
3521 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3522 if (rcStrict != VINF_SUCCESS)
3523 return rcStrict;
3524
3525 /* Mark the CS selector as 'accessed'. */
3526 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3527 {
3528 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3529 if (rcStrict != VINF_SUCCESS)
3530 return rcStrict;
3531 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3532 }
3533
3534 /*
3535 * Start committing the register changes (joins with the other branch).
3536 */
3537 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3538 }
3539
3540 /* ... register committing continues. */
3541 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3542 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3543 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3544 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3545 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3546 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3547
3548 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3549 fEfl &= ~fEflToClear;
3550 IEMMISC_SET_EFL(pVCpu, fEfl);
3551
3552 if (fFlags & IEM_XCPT_FLAGS_CR2)
3553 pVCpu->cpum.GstCtx.cr2 = uCr2;
3554
3555 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3556 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3557
3558 /* Make sure the execution flags are correct. */
3559 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3560 if (fExecNew != pVCpu->iem.s.fExec)
3561 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3562 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3563 pVCpu->iem.s.fExec = fExecNew;
3564 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3565
3566 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3567}
3568
3569
3570/**
3571 * Implements exceptions and interrupts for long mode.
3572 *
3573 * @returns VBox strict status code.
3574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3575 * @param cbInstr The number of bytes to offset rIP by in the return
3576 * address.
3577 * @param u8Vector The interrupt / exception vector number.
3578 * @param fFlags The flags.
3579 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3580 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3581 */
3582static VBOXSTRICTRC
3583iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3584 uint8_t cbInstr,
3585 uint8_t u8Vector,
3586 uint32_t fFlags,
3587 uint16_t uErr,
3588 uint64_t uCr2) RT_NOEXCEPT
3589{
3590 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3591
3592 /*
3593 * Read the IDT entry.
3594 */
3595 uint16_t offIdt = (uint16_t)u8Vector << 4;
3596 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3597 {
3598 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3599 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3600 }
3601 X86DESC64 Idte;
3602#ifdef _MSC_VER /* Shut up silly compiler warning. */
3603 Idte.au64[0] = 0;
3604 Idte.au64[1] = 0;
3605#endif
3606 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3608 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3609 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3610 {
3611 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3612 return rcStrict;
3613 }
3614 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3615 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3616 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3617
3618 /*
3619 * Check the descriptor type, DPL and such.
3620 * ASSUMES this is done in the same order as described for call-gate calls.
3621 */
3622 if (Idte.Gate.u1DescType)
3623 {
3624 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3625 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3626 }
3627 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3628 switch (Idte.Gate.u4Type)
3629 {
3630 case AMD64_SEL_TYPE_SYS_INT_GATE:
3631 fEflToClear |= X86_EFL_IF;
3632 break;
3633 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3634 break;
3635
3636 default:
3637 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3638 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3639 }
3640
3641 /* Check DPL against CPL if applicable. */
3642 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3643 {
3644 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 }
3650
3651 /* Is it there? */
3652 if (!Idte.Gate.u1Present)
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3655 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3656 }
3657
3658 /* A null CS is bad. */
3659 RTSEL NewCS = Idte.Gate.u16Sel;
3660 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3661 {
3662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3663 return iemRaiseGeneralProtectionFault0(pVCpu);
3664 }
3665
3666 /* Fetch the descriptor for the new CS. */
3667 IEMSELDESC DescCS;
3668 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3669 if (rcStrict != VINF_SUCCESS)
3670 {
3671 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3672 return rcStrict;
3673 }
3674
3675 /* Must be a 64-bit code segment. */
3676 if (!DescCS.Long.Gen.u1DescType)
3677 {
3678 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3679 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3680 }
3681 if ( !DescCS.Long.Gen.u1Long
3682 || DescCS.Long.Gen.u1DefBig
3683 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3684 {
3685 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3686 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3687 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3688 }
3689
3690 /* Don't allow lowering the privilege level. For non-conforming CS
3691 selectors, the CS.DPL sets the privilege level the trap/interrupt
3692 handler runs at. For conforming CS selectors, the CPL remains
3693 unchanged, but the CS.DPL must be <= CPL. */
3694 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3695 * when CPU in Ring-0. Result \#GP? */
3696 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3697 {
3698 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3699 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3700 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3701 }
3702
3703
3704 /* Make sure the selector is present. */
3705 if (!DescCS.Legacy.Gen.u1Present)
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3708 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3709 }
3710
3711 /* Check that the new RIP is canonical. */
3712 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3713 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3714 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3715 if (!IEM_IS_CANONICAL(uNewRip))
3716 {
3717 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3718 return iemRaiseGeneralProtectionFault0(pVCpu);
3719 }
3720
3721 /*
3722 * If the privilege level changes or if the IST isn't zero, we need to get
3723 * a new stack from the TSS.
3724 */
3725 uint64_t uNewRsp;
3726 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3727 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3728 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3729 || Idte.Gate.u3IST != 0)
3730 {
3731 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3732 if (rcStrict != VINF_SUCCESS)
3733 return rcStrict;
3734 }
3735 else
3736 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3737 uNewRsp &= ~(uint64_t)0xf;
3738
3739 /*
3740 * Calc the flag image to push.
3741 */
3742 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3743 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3744 fEfl &= ~X86_EFL_RF;
3745 else
3746 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3747
3748 /*
3749 * Start making changes.
3750 */
3751 /* Set the new CPL so that stack accesses use it. */
3752 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3753 IEM_SET_CPL(pVCpu, uNewCpl);
3754/** @todo Setting CPL this early seems wrong as it would affect and errors we
3755 * raise accessing the stack and (?) GDT/LDT... */
3756
3757 /* Create the stack frame. */
3758 uint8_t bUnmapInfoStackFrame;
3759 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3760 RTPTRUNION uStackFrame;
3761 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3762 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3763 if (rcStrict != VINF_SUCCESS)
3764 return rcStrict;
3765
3766 if (fFlags & IEM_XCPT_FLAGS_ERR)
3767 *uStackFrame.pu64++ = uErr;
3768 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3769 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3770 uStackFrame.pu64[2] = fEfl;
3771 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3772 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3773 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3774 if (rcStrict != VINF_SUCCESS)
3775 return rcStrict;
3776
3777 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3778 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3779 * after pushing the stack frame? (Write protect the gdt + stack to
3780 * find out.) */
3781 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3782 {
3783 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3787 }
3788
3789 /*
3790 * Start comitting the register changes.
3791 */
3792 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3793 * hidden registers when interrupting 32-bit or 16-bit code! */
3794 if (uNewCpl != uOldCpl)
3795 {
3796 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3797 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3798 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3799 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3800 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3801 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3802 }
3803 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3804 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3805 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3806 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3807 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3808 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3809 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3810 pVCpu->cpum.GstCtx.rip = uNewRip;
3811
3812 fEfl &= ~fEflToClear;
3813 IEMMISC_SET_EFL(pVCpu, fEfl);
3814
3815 if (fFlags & IEM_XCPT_FLAGS_CR2)
3816 pVCpu->cpum.GstCtx.cr2 = uCr2;
3817
3818 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3819 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3820
3821 iemRecalcExecModeAndCplFlags(pVCpu);
3822
3823 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3824}
3825
3826
3827/**
3828 * Implements exceptions and interrupts.
3829 *
3830 * All exceptions and interrupts goes thru this function!
3831 *
3832 * @returns VBox strict status code.
3833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3834 * @param cbInstr The number of bytes to offset rIP by in the return
3835 * address.
3836 * @param u8Vector The interrupt / exception vector number.
3837 * @param fFlags The flags.
3838 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3839 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3840 */
3841VBOXSTRICTRC
3842iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3843 uint8_t cbInstr,
3844 uint8_t u8Vector,
3845 uint32_t fFlags,
3846 uint16_t uErr,
3847 uint64_t uCr2) RT_NOEXCEPT
3848{
3849 /*
3850 * Get all the state that we might need here.
3851 */
3852 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3853 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3854
3855#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3856 /*
3857 * Flush prefetch buffer
3858 */
3859 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3860#endif
3861
3862 /*
3863 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3864 */
3865 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3866 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3867 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3868 | IEM_XCPT_FLAGS_BP_INSTR
3869 | IEM_XCPT_FLAGS_ICEBP_INSTR
3870 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3871 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3872 {
3873 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3874 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3875 u8Vector = X86_XCPT_GP;
3876 uErr = 0;
3877 }
3878
3879 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3880#ifdef DBGFTRACE_ENABLED
3881 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3882 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3883 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3884#endif
3885
3886 /*
3887 * Check if DBGF wants to intercept the exception.
3888 */
3889 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3890 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3891 { /* likely */ }
3892 else
3893 {
3894 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3895 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3896 if (rcStrict != VINF_SUCCESS)
3897 return rcStrict;
3898 }
3899
3900 /*
3901 * Evaluate whether NMI blocking should be in effect.
3902 * Normally, NMI blocking is in effect whenever we inject an NMI.
3903 */
3904 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3905 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3906
3907#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3908 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3909 {
3910 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3911 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3912 return rcStrict0;
3913
3914 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3915 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3916 {
3917 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3918 fBlockNmi = false;
3919 }
3920 }
3921#endif
3922
3923#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3924 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3925 {
3926 /*
3927 * If the event is being injected as part of VMRUN, it isn't subject to event
3928 * intercepts in the nested-guest. However, secondary exceptions that occur
3929 * during injection of any event -are- subject to exception intercepts.
3930 *
3931 * See AMD spec. 15.20 "Event Injection".
3932 */
3933 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3934 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3935 else
3936 {
3937 /*
3938 * Check and handle if the event being raised is intercepted.
3939 */
3940 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3941 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3942 return rcStrict0;
3943 }
3944 }
3945#endif
3946
3947 /*
3948 * Set NMI blocking if necessary.
3949 */
3950 if (fBlockNmi)
3951 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3952
3953 /*
3954 * Do recursion accounting.
3955 */
3956 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3957 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3958 if (pVCpu->iem.s.cXcptRecursions == 0)
3959 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3960 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3961 else
3962 {
3963 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3964 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3965 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3966
3967 if (pVCpu->iem.s.cXcptRecursions >= 4)
3968 {
3969#ifdef DEBUG_bird
3970 AssertFailed();
3971#endif
3972 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3973 }
3974
3975 /*
3976 * Evaluate the sequence of recurring events.
3977 */
3978 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3979 NULL /* pXcptRaiseInfo */);
3980 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3981 { /* likely */ }
3982 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3983 {
3984 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3985 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3986 u8Vector = X86_XCPT_DF;
3987 uErr = 0;
3988#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3989 /* VMX nested-guest #DF intercept needs to be checked here. */
3990 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3991 {
3992 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3993 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3994 return rcStrict0;
3995 }
3996#endif
3997 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3998 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3999 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4000 }
4001 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4002 {
4003 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4004 return iemInitiateCpuShutdown(pVCpu);
4005 }
4006 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4007 {
4008 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4009 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4010 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4011 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4012 return VERR_EM_GUEST_CPU_HANG;
4013 }
4014 else
4015 {
4016 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4017 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4018 return VERR_IEM_IPE_9;
4019 }
4020
4021 /*
4022 * The 'EXT' bit is set when an exception occurs during deliver of an external
4023 * event (such as an interrupt or earlier exception)[1]. Privileged software
4024 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4025 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4026 *
4027 * [1] - Intel spec. 6.13 "Error Code"
4028 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4029 * [3] - Intel Instruction reference for INT n.
4030 */
4031 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4032 && (fFlags & IEM_XCPT_FLAGS_ERR)
4033 && u8Vector != X86_XCPT_PF
4034 && u8Vector != X86_XCPT_DF)
4035 {
4036 uErr |= X86_TRAP_ERR_EXTERNAL;
4037 }
4038 }
4039
4040 pVCpu->iem.s.cXcptRecursions++;
4041 pVCpu->iem.s.uCurXcpt = u8Vector;
4042 pVCpu->iem.s.fCurXcpt = fFlags;
4043 pVCpu->iem.s.uCurXcptErr = uErr;
4044 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4045
4046 /*
4047 * Extensive logging.
4048 */
4049#if defined(LOG_ENABLED) && defined(IN_RING3)
4050 if (LogIs3Enabled())
4051 {
4052 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4053 char szRegs[4096];
4054 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4055 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4056 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4057 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4058 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4059 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4060 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4061 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4062 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4063 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4064 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4065 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4066 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4067 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4068 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4069 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4070 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4071 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4072 " efer=%016VR{efer}\n"
4073 " pat=%016VR{pat}\n"
4074 " sf_mask=%016VR{sf_mask}\n"
4075 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4076 " lstar=%016VR{lstar}\n"
4077 " star=%016VR{star} cstar=%016VR{cstar}\n"
4078 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4079 );
4080
4081 char szInstr[256];
4082 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4083 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4084 szInstr, sizeof(szInstr), NULL);
4085 Log3(("%s%s\n", szRegs, szInstr));
4086 }
4087#endif /* LOG_ENABLED */
4088
4089 /*
4090 * Stats.
4091 */
4092 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4093 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4094 else if (u8Vector <= X86_XCPT_LAST)
4095 {
4096 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4097 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4098 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4099 }
4100
4101 /*
4102 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4103 * to ensure that a stale TLB or paging cache entry will only cause one
4104 * spurious #PF.
4105 */
4106 if ( u8Vector == X86_XCPT_PF
4107 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4108 IEMTlbInvalidatePage(pVCpu, uCr2);
4109
4110 /*
4111 * Call the mode specific worker function.
4112 */
4113 VBOXSTRICTRC rcStrict;
4114 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4115 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4116 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4117 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4118 else
4119 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4120
4121 /* Flush the prefetch buffer. */
4122 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4123
4124 /*
4125 * Unwind.
4126 */
4127 pVCpu->iem.s.cXcptRecursions--;
4128 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4129 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4130 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4131 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4132 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4133 return rcStrict;
4134}
4135
4136#ifdef IEM_WITH_SETJMP
4137/**
4138 * See iemRaiseXcptOrInt. Will not return.
4139 */
4140DECL_NO_RETURN(void)
4141iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4142 uint8_t cbInstr,
4143 uint8_t u8Vector,
4144 uint32_t fFlags,
4145 uint16_t uErr,
4146 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4147{
4148 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4149 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4150}
4151#endif
4152
4153
4154/** \#DE - 00. */
4155VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4156{
4157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4158}
4159
4160
4161/** \#DB - 01.
4162 * @note This automatically clear DR7.GD. */
4163VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4164{
4165 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4166 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4167 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4168}
4169
4170
4171/** \#BR - 05. */
4172VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4173{
4174 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4175}
4176
4177
4178/** \#UD - 06. */
4179VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4180{
4181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4182}
4183
4184
4185/** \#NM - 07. */
4186VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4189}
4190
4191
4192/** \#TS(err) - 0a. */
4193VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4196}
4197
4198
4199/** \#TS(tr) - 0a. */
4200VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4203 pVCpu->cpum.GstCtx.tr.Sel, 0);
4204}
4205
4206
4207/** \#TS(0) - 0a. */
4208VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4211 0, 0);
4212}
4213
4214
4215/** \#TS(err) - 0a. */
4216VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4217{
4218 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4219 uSel & X86_SEL_MASK_OFF_RPL, 0);
4220}
4221
4222
4223/** \#NP(err) - 0b. */
4224VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4225{
4226 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4227}
4228
4229
4230/** \#NP(sel) - 0b. */
4231VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4232{
4233 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4234 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4235 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4236 uSel & ~X86_SEL_RPL, 0);
4237}
4238
4239
4240/** \#SS(seg) - 0c. */
4241VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4242{
4243 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4244 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4245 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4246 uSel & ~X86_SEL_RPL, 0);
4247}
4248
4249
4250/** \#SS(err) - 0c. */
4251VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4252{
4253 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4254 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4255 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4256}
4257
4258
4259/** \#GP(n) - 0d. */
4260VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4261{
4262 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4264}
4265
4266
4267/** \#GP(0) - 0d. */
4268VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4269{
4270 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4272}
4273
4274#ifdef IEM_WITH_SETJMP
4275/** \#GP(0) - 0d. */
4276DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4277{
4278 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4279 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4280}
4281#endif
4282
4283
4284/** \#GP(sel) - 0d. */
4285VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4286{
4287 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4288 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4289 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4290 Sel & ~X86_SEL_RPL, 0);
4291}
4292
4293
4294/** \#GP(0) - 0d. */
4295VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4296{
4297 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4299}
4300
4301
4302/** \#GP(sel) - 0d. */
4303VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4304{
4305 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4306 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4307 NOREF(iSegReg); NOREF(fAccess);
4308 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4309 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4310}
4311
4312#ifdef IEM_WITH_SETJMP
4313/** \#GP(sel) - 0d, longjmp. */
4314DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4315{
4316 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4317 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4318 NOREF(iSegReg); NOREF(fAccess);
4319 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4320 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4321}
4322#endif
4323
4324/** \#GP(sel) - 0d. */
4325VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4326{
4327 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4328 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4329 NOREF(Sel);
4330 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4331}
4332
4333#ifdef IEM_WITH_SETJMP
4334/** \#GP(sel) - 0d, longjmp. */
4335DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4336{
4337 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4338 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4339 NOREF(Sel);
4340 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4341}
4342#endif
4343
4344
4345/** \#GP(sel) - 0d. */
4346VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4347{
4348 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4349 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4350 NOREF(iSegReg); NOREF(fAccess);
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4352}
4353
4354#ifdef IEM_WITH_SETJMP
4355/** \#GP(sel) - 0d, longjmp. */
4356DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4357{
4358 NOREF(iSegReg); NOREF(fAccess);
4359 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4360}
4361#endif
4362
4363
4364/** \#PF(n) - 0e. */
4365VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4366{
4367 uint16_t uErr;
4368 switch (rc)
4369 {
4370 case VERR_PAGE_NOT_PRESENT:
4371 case VERR_PAGE_TABLE_NOT_PRESENT:
4372 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4373 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4374 uErr = 0;
4375 break;
4376
4377 default:
4378 AssertMsgFailed(("%Rrc\n", rc));
4379 RT_FALL_THRU();
4380 case VERR_ACCESS_DENIED:
4381 uErr = X86_TRAP_PF_P;
4382 break;
4383
4384 /** @todo reserved */
4385 }
4386
4387 if (IEM_GET_CPL(pVCpu) == 3)
4388 uErr |= X86_TRAP_PF_US;
4389
4390 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4391 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4392 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4393 uErr |= X86_TRAP_PF_ID;
4394
4395#if 0 /* This is so much non-sense, really. Why was it done like that? */
4396 /* Note! RW access callers reporting a WRITE protection fault, will clear
4397 the READ flag before calling. So, read-modify-write accesses (RW)
4398 can safely be reported as READ faults. */
4399 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4400 uErr |= X86_TRAP_PF_RW;
4401#else
4402 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4403 {
4404 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4405 /// (regardless of outcome of the comparison in the latter case).
4406 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4407 uErr |= X86_TRAP_PF_RW;
4408 }
4409#endif
4410
4411 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4412 of the memory operand rather than at the start of it. (Not sure what
4413 happens if it crosses a page boundrary.) The current heuristics for
4414 this is to report the #PF for the last byte if the access is more than
4415 64 bytes. This is probably not correct, but we can work that out later,
4416 main objective now is to get FXSAVE to work like for real hardware and
4417 make bs3-cpu-basic2 work. */
4418 if (cbAccess <= 64)
4419 { /* likely*/ }
4420 else
4421 GCPtrWhere += cbAccess - 1;
4422
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4424 uErr, GCPtrWhere);
4425}
4426
4427#ifdef IEM_WITH_SETJMP
4428/** \#PF(n) - 0e, longjmp. */
4429DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4430 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4431{
4432 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4433}
4434#endif
4435
4436
4437/** \#MF(0) - 10. */
4438VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4439{
4440 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4441 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4442
4443 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4444 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4445 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4446}
4447
4448
4449/** \#AC(0) - 11. */
4450VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4451{
4452 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4453}
4454
4455#ifdef IEM_WITH_SETJMP
4456/** \#AC(0) - 11, longjmp. */
4457DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4458{
4459 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4460}
4461#endif
4462
4463
4464/** \#XF(0)/\#XM(0) - 19. */
4465VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4466{
4467 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4468}
4469
4470
4471/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4472IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4473{
4474 NOREF(cbInstr);
4475 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4476}
4477
4478
4479/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4480IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4481{
4482 NOREF(cbInstr);
4483 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4484}
4485
4486
4487/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4488IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4489{
4490 NOREF(cbInstr);
4491 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4492}
4493
4494
4495/** @} */
4496
4497/** @name Common opcode decoders.
4498 * @{
4499 */
4500//#include <iprt/mem.h>
4501
4502/**
4503 * Used to add extra details about a stub case.
4504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4505 */
4506void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4507{
4508#if defined(LOG_ENABLED) && defined(IN_RING3)
4509 PVM pVM = pVCpu->CTX_SUFF(pVM);
4510 char szRegs[4096];
4511 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4512 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4513 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4514 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4515 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4516 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4517 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4518 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4519 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4520 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4521 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4522 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4523 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4524 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4525 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4526 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4527 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4528 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4529 " efer=%016VR{efer}\n"
4530 " pat=%016VR{pat}\n"
4531 " sf_mask=%016VR{sf_mask}\n"
4532 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4533 " lstar=%016VR{lstar}\n"
4534 " star=%016VR{star} cstar=%016VR{cstar}\n"
4535 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4536 );
4537
4538 char szInstr[256];
4539 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4540 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4541 szInstr, sizeof(szInstr), NULL);
4542
4543 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4544#else
4545 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4546#endif
4547}
4548
4549/** @} */
4550
4551
4552
4553/** @name Register Access.
4554 * @{
4555 */
4556
4557/**
4558 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4559 *
4560 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4561 * segment limit.
4562 *
4563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4564 * @param cbInstr Instruction size.
4565 * @param offNextInstr The offset of the next instruction.
4566 * @param enmEffOpSize Effective operand size.
4567 */
4568VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4569 IEMMODE enmEffOpSize) RT_NOEXCEPT
4570{
4571 switch (enmEffOpSize)
4572 {
4573 case IEMMODE_16BIT:
4574 {
4575 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4576 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4577 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4578 pVCpu->cpum.GstCtx.rip = uNewIp;
4579 else
4580 return iemRaiseGeneralProtectionFault0(pVCpu);
4581 break;
4582 }
4583
4584 case IEMMODE_32BIT:
4585 {
4586 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4587 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4588
4589 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4590 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4591 pVCpu->cpum.GstCtx.rip = uNewEip;
4592 else
4593 return iemRaiseGeneralProtectionFault0(pVCpu);
4594 break;
4595 }
4596
4597 case IEMMODE_64BIT:
4598 {
4599 Assert(IEM_IS_64BIT_CODE(pVCpu));
4600
4601 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4602 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4603 pVCpu->cpum.GstCtx.rip = uNewRip;
4604 else
4605 return iemRaiseGeneralProtectionFault0(pVCpu);
4606 break;
4607 }
4608
4609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4610 }
4611
4612#ifndef IEM_WITH_CODE_TLB
4613 /* Flush the prefetch buffer. */
4614 pVCpu->iem.s.cbOpcode = cbInstr;
4615#endif
4616
4617 /*
4618 * Clear RF and finish the instruction (maybe raise #DB).
4619 */
4620 return iemRegFinishClearingRF(pVCpu);
4621}
4622
4623
4624/**
4625 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4626 *
4627 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4628 * segment limit.
4629 *
4630 * @returns Strict VBox status code.
4631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4632 * @param cbInstr Instruction size.
4633 * @param offNextInstr The offset of the next instruction.
4634 */
4635VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4636{
4637 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4638
4639 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4640 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4641 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4642 pVCpu->cpum.GstCtx.rip = uNewIp;
4643 else
4644 return iemRaiseGeneralProtectionFault0(pVCpu);
4645
4646#ifndef IEM_WITH_CODE_TLB
4647 /* Flush the prefetch buffer. */
4648 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4649#endif
4650
4651 /*
4652 * Clear RF and finish the instruction (maybe raise #DB).
4653 */
4654 return iemRegFinishClearingRF(pVCpu);
4655}
4656
4657
4658/**
4659 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4660 *
4661 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4662 * segment limit.
4663 *
4664 * @returns Strict VBox status code.
4665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4666 * @param cbInstr Instruction size.
4667 * @param offNextInstr The offset of the next instruction.
4668 * @param enmEffOpSize Effective operand size.
4669 */
4670VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4671 IEMMODE enmEffOpSize) RT_NOEXCEPT
4672{
4673 if (enmEffOpSize == IEMMODE_32BIT)
4674 {
4675 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4676
4677 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4678 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4679 pVCpu->cpum.GstCtx.rip = uNewEip;
4680 else
4681 return iemRaiseGeneralProtectionFault0(pVCpu);
4682 }
4683 else
4684 {
4685 Assert(enmEffOpSize == IEMMODE_64BIT);
4686
4687 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4688 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4689 pVCpu->cpum.GstCtx.rip = uNewRip;
4690 else
4691 return iemRaiseGeneralProtectionFault0(pVCpu);
4692 }
4693
4694#ifndef IEM_WITH_CODE_TLB
4695 /* Flush the prefetch buffer. */
4696 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4697#endif
4698
4699 /*
4700 * Clear RF and finish the instruction (maybe raise #DB).
4701 */
4702 return iemRegFinishClearingRF(pVCpu);
4703}
4704
4705/** @} */
4706
4707
4708/** @name FPU access and helpers.
4709 *
4710 * @{
4711 */
4712
4713/**
4714 * Updates the x87.DS and FPUDP registers.
4715 *
4716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4717 * @param pFpuCtx The FPU context.
4718 * @param iEffSeg The effective segment register.
4719 * @param GCPtrEff The effective address relative to @a iEffSeg.
4720 */
4721DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4722{
4723 RTSEL sel;
4724 switch (iEffSeg)
4725 {
4726 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4727 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4728 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4729 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4730 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4731 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4732 default:
4733 AssertMsgFailed(("%d\n", iEffSeg));
4734 sel = pVCpu->cpum.GstCtx.ds.Sel;
4735 }
4736 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4737 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4738 {
4739 pFpuCtx->DS = 0;
4740 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4741 }
4742 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4743 {
4744 pFpuCtx->DS = sel;
4745 pFpuCtx->FPUDP = GCPtrEff;
4746 }
4747 else
4748 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4749}
4750
4751
4752/**
4753 * Rotates the stack registers in the push direction.
4754 *
4755 * @param pFpuCtx The FPU context.
4756 * @remarks This is a complete waste of time, but fxsave stores the registers in
4757 * stack order.
4758 */
4759DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4760{
4761 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4762 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4763 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4764 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4765 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4766 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4767 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4768 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4769 pFpuCtx->aRegs[0].r80 = r80Tmp;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the pop direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4783 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4784 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4785 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4786 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4787 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4788 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4789 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4790 pFpuCtx->aRegs[7].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4796 * exception prevents it.
4797 *
4798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4799 * @param pResult The FPU operation result to push.
4800 * @param pFpuCtx The FPU context.
4801 */
4802static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4803{
4804 /* Update FSW and bail if there are pending exceptions afterwards. */
4805 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4806 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4807 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4808 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4809 {
4810 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4811 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4812 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4813 pFpuCtx->FSW = fFsw;
4814 return;
4815 }
4816
4817 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4818 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4819 {
4820 /* All is fine, push the actual value. */
4821 pFpuCtx->FTW |= RT_BIT(iNewTop);
4822 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4823 }
4824 else if (pFpuCtx->FCW & X86_FCW_IM)
4825 {
4826 /* Masked stack overflow, push QNaN. */
4827 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4828 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4829 }
4830 else
4831 {
4832 /* Raise stack overflow, don't push anything. */
4833 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4834 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4835 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4836 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4837 return;
4838 }
4839
4840 fFsw &= ~X86_FSW_TOP_MASK;
4841 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4842 pFpuCtx->FSW = fFsw;
4843
4844 iemFpuRotateStackPush(pFpuCtx);
4845 RT_NOREF(pVCpu);
4846}
4847
4848
4849/**
4850 * Stores a result in a FPU register and updates the FSW and FTW.
4851 *
4852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4853 * @param pFpuCtx The FPU context.
4854 * @param pResult The result to store.
4855 * @param iStReg Which FPU register to store it in.
4856 */
4857static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4858{
4859 Assert(iStReg < 8);
4860 uint16_t fNewFsw = pFpuCtx->FSW;
4861 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4862 fNewFsw &= ~X86_FSW_C_MASK;
4863 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4864 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4865 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4866 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4867 pFpuCtx->FSW = fNewFsw;
4868 pFpuCtx->FTW |= RT_BIT(iReg);
4869 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4870 RT_NOREF(pVCpu);
4871}
4872
4873
4874/**
4875 * Only updates the FPU status word (FSW) with the result of the current
4876 * instruction.
4877 *
4878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4879 * @param pFpuCtx The FPU context.
4880 * @param u16FSW The FSW output of the current instruction.
4881 */
4882static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4883{
4884 uint16_t fNewFsw = pFpuCtx->FSW;
4885 fNewFsw &= ~X86_FSW_C_MASK;
4886 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4887 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4888 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4889 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4890 pFpuCtx->FSW = fNewFsw;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Pops one item off the FPU stack if no pending exception prevents it.
4897 *
4898 * @param pFpuCtx The FPU context.
4899 */
4900static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4901{
4902 /* Check pending exceptions. */
4903 uint16_t uFSW = pFpuCtx->FSW;
4904 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4905 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4906 return;
4907
4908 /* TOP--. */
4909 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4910 uFSW &= ~X86_FSW_TOP_MASK;
4911 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4912 pFpuCtx->FSW = uFSW;
4913
4914 /* Mark the previous ST0 as empty. */
4915 iOldTop >>= X86_FSW_TOP_SHIFT;
4916 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4917
4918 /* Rotate the registers. */
4919 iemFpuRotateStackPop(pFpuCtx);
4920}
4921
4922
4923/**
4924 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4925 *
4926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4927 * @param pResult The FPU operation result to push.
4928 * @param uFpuOpcode The FPU opcode value.
4929 */
4930void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4931{
4932 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4933 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4934 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4935}
4936
4937
4938/**
4939 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4940 * and sets FPUDP and FPUDS.
4941 *
4942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4943 * @param pResult The FPU operation result to push.
4944 * @param iEffSeg The effective segment register.
4945 * @param GCPtrEff The effective address relative to @a iEffSeg.
4946 * @param uFpuOpcode The FPU opcode value.
4947 */
4948void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4949 uint16_t uFpuOpcode) RT_NOEXCEPT
4950{
4951 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4952 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4953 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4954 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4955}
4956
4957
4958/**
4959 * Replace ST0 with the first value and push the second onto the FPU stack,
4960 * unless a pending exception prevents it.
4961 *
4962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4963 * @param pResult The FPU operation result to store and push.
4964 * @param uFpuOpcode The FPU opcode value.
4965 */
4966void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4967{
4968 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4969 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4970
4971 /* Update FSW and bail if there are pending exceptions afterwards. */
4972 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4973 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4974 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4975 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4976 {
4977 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4978 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4979 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4980 pFpuCtx->FSW = fFsw;
4981 return;
4982 }
4983
4984 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4985 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4986 {
4987 /* All is fine, push the actual value. */
4988 pFpuCtx->FTW |= RT_BIT(iNewTop);
4989 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4990 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4991 }
4992 else if (pFpuCtx->FCW & X86_FCW_IM)
4993 {
4994 /* Masked stack overflow, push QNaN. */
4995 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4996 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4998 }
4999 else
5000 {
5001 /* Raise stack overflow, don't push anything. */
5002 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5003 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5004 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5005 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5006 return;
5007 }
5008
5009 fFsw &= ~X86_FSW_TOP_MASK;
5010 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5011 pFpuCtx->FSW = fFsw;
5012
5013 iemFpuRotateStackPush(pFpuCtx);
5014}
5015
5016
5017/**
5018 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5019 * FOP.
5020 *
5021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5022 * @param pResult The result to store.
5023 * @param iStReg Which FPU register to store it in.
5024 * @param uFpuOpcode The FPU opcode value.
5025 */
5026void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5027{
5028 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5029 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5030 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5031}
5032
5033
5034/**
5035 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5036 * FOP, and then pops the stack.
5037 *
5038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5039 * @param pResult The result to store.
5040 * @param iStReg Which FPU register to store it in.
5041 * @param uFpuOpcode The FPU opcode value.
5042 */
5043void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5044{
5045 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5046 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5047 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5048 iemFpuMaybePopOne(pFpuCtx);
5049}
5050
5051
5052/**
5053 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5054 * FPUDP, and FPUDS.
5055 *
5056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5057 * @param pResult The result to store.
5058 * @param iStReg Which FPU register to store it in.
5059 * @param iEffSeg The effective memory operand selector register.
5060 * @param GCPtrEff The effective memory operand offset.
5061 * @param uFpuOpcode The FPU opcode value.
5062 */
5063void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5064 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5068 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5069 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS, and then pops the stack.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5085 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091 iemFpuMaybePopOne(pFpuCtx);
5092}
5093
5094
5095/**
5096 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param uFpuOpcode The FPU opcode value.
5100 */
5101void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5102{
5103 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5104 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5105}
5106
5107
5108/**
5109 * Updates the FSW, FOP, FPUIP, and FPUCS.
5110 *
5111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5112 * @param u16FSW The FSW from the current instruction.
5113 * @param uFpuOpcode The FPU opcode value.
5114 */
5115void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5116{
5117 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5118 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5119 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5120}
5121
5122
5123/**
5124 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5125 *
5126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5127 * @param u16FSW The FSW from the current instruction.
5128 * @param uFpuOpcode The FPU opcode value.
5129 */
5130void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5131{
5132 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5133 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5134 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5135 iemFpuMaybePopOne(pFpuCtx);
5136}
5137
5138
5139/**
5140 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5141 *
5142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5143 * @param u16FSW The FSW from the current instruction.
5144 * @param iEffSeg The effective memory operand selector register.
5145 * @param GCPtrEff The effective memory operand offset.
5146 * @param uFpuOpcode The FPU opcode value.
5147 */
5148void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5149{
5150 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5151 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5152 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5153 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5154}
5155
5156
5157/**
5158 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5159 *
5160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5161 * @param u16FSW The FSW from the current instruction.
5162 * @param uFpuOpcode The FPU opcode value.
5163 */
5164void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5165{
5166 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5167 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5168 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5169 iemFpuMaybePopOne(pFpuCtx);
5170 iemFpuMaybePopOne(pFpuCtx);
5171}
5172
5173
5174/**
5175 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5176 *
5177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5178 * @param u16FSW The FSW from the current instruction.
5179 * @param iEffSeg The effective memory operand selector register.
5180 * @param GCPtrEff The effective memory operand offset.
5181 * @param uFpuOpcode The FPU opcode value.
5182 */
5183void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5184 uint16_t uFpuOpcode) RT_NOEXCEPT
5185{
5186 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5187 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191}
5192
5193
5194/**
5195 * Worker routine for raising an FPU stack underflow exception.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param pFpuCtx The FPU context.
5199 * @param iStReg The stack register being accessed.
5200 */
5201static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5202{
5203 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5204 if (pFpuCtx->FCW & X86_FCW_IM)
5205 {
5206 /* Masked underflow. */
5207 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5208 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5209 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5210 if (iStReg != UINT8_MAX)
5211 {
5212 pFpuCtx->FTW |= RT_BIT(iReg);
5213 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5214 }
5215 }
5216 else
5217 {
5218 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5219 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5220 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5221 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5222 }
5223 RT_NOREF(pVCpu);
5224}
5225
5226
5227/**
5228 * Raises a FPU stack underflow exception.
5229 *
5230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5231 * @param iStReg The destination register that should be loaded
5232 * with QNaN if \#IS is not masked. Specify
5233 * UINT8_MAX if none (like for fcom).
5234 * @param uFpuOpcode The FPU opcode value.
5235 */
5236void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5237{
5238 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5239 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5240 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5241}
5242
5243
5244void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5245{
5246 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5247 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5248 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5249 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5250}
5251
5252
5253void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5254{
5255 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5256 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5257 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5258 iemFpuMaybePopOne(pFpuCtx);
5259}
5260
5261
5262void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5263 uint16_t uFpuOpcode) RT_NOEXCEPT
5264{
5265 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5266 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5267 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5268 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5269 iemFpuMaybePopOne(pFpuCtx);
5270}
5271
5272
5273void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5274{
5275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5276 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5277 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5278 iemFpuMaybePopOne(pFpuCtx);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5284{
5285 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5286 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5287
5288 if (pFpuCtx->FCW & X86_FCW_IM)
5289 {
5290 /* Masked overflow - Push QNaN. */
5291 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5292 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5293 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5294 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5295 pFpuCtx->FTW |= RT_BIT(iNewTop);
5296 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5297 iemFpuRotateStackPush(pFpuCtx);
5298 }
5299 else
5300 {
5301 /* Exception pending - don't change TOP or the register stack. */
5302 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5303 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5304 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5305 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5306 }
5307}
5308
5309
5310void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5311{
5312 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5313 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5314
5315 if (pFpuCtx->FCW & X86_FCW_IM)
5316 {
5317 /* Masked overflow - Push QNaN. */
5318 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5319 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5320 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5321 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5322 pFpuCtx->FTW |= RT_BIT(iNewTop);
5323 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5324 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5325 iemFpuRotateStackPush(pFpuCtx);
5326 }
5327 else
5328 {
5329 /* Exception pending - don't change TOP or the register stack. */
5330 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5331 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5332 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5333 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5334 }
5335}
5336
5337
5338/**
5339 * Worker routine for raising an FPU stack overflow exception on a push.
5340 *
5341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5342 * @param pFpuCtx The FPU context.
5343 */
5344static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5345{
5346 if (pFpuCtx->FCW & X86_FCW_IM)
5347 {
5348 /* Masked overflow. */
5349 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5350 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5351 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5352 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5353 pFpuCtx->FTW |= RT_BIT(iNewTop);
5354 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5355 iemFpuRotateStackPush(pFpuCtx);
5356 }
5357 else
5358 {
5359 /* Exception pending - don't change TOP or the register stack. */
5360 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5361 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5362 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5363 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5364 }
5365 RT_NOREF(pVCpu);
5366}
5367
5368
5369/**
5370 * Raises a FPU stack overflow exception on a push.
5371 *
5372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5373 * @param uFpuOpcode The FPU opcode value.
5374 */
5375void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5376{
5377 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5378 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5379 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5380}
5381
5382
5383/**
5384 * Raises a FPU stack overflow exception on a push with a memory operand.
5385 *
5386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5387 * @param iEffSeg The effective memory operand selector register.
5388 * @param GCPtrEff The effective memory operand offset.
5389 * @param uFpuOpcode The FPU opcode value.
5390 */
5391void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5392{
5393 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5394 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5395 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5396 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5397}
5398
5399/** @} */
5400
5401
5402/** @name SSE+AVX SIMD access and helpers.
5403 *
5404 * @{
5405 */
5406/**
5407 * Stores a result in a SIMD XMM register, updates the MXCSR.
5408 *
5409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5410 * @param pResult The result to store.
5411 * @param iXmmReg Which SIMD XMM register to store the result in.
5412 */
5413void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5414{
5415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5416 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5417
5418 /* The result is only updated if there is no unmasked exception pending. */
5419 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5420 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5421 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5422}
5423
5424
5425/**
5426 * Updates the MXCSR.
5427 *
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param fMxcsr The new MXCSR value.
5430 */
5431void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5432{
5433 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5434 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5435}
5436/** @} */
5437
5438
5439/** @name Memory access.
5440 *
5441 * @{
5442 */
5443
5444#undef LOG_GROUP
5445#define LOG_GROUP LOG_GROUP_IEM_MEM
5446
5447/**
5448 * Updates the IEMCPU::cbWritten counter if applicable.
5449 *
5450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5451 * @param fAccess The access being accounted for.
5452 * @param cbMem The access size.
5453 */
5454DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5455{
5456 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5457 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5458 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5459}
5460
5461
5462/**
5463 * Applies the segment limit, base and attributes.
5464 *
5465 * This may raise a \#GP or \#SS.
5466 *
5467 * @returns VBox strict status code.
5468 *
5469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5470 * @param fAccess The kind of access which is being performed.
5471 * @param iSegReg The index of the segment register to apply.
5472 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5473 * TSS, ++).
5474 * @param cbMem The access size.
5475 * @param pGCPtrMem Pointer to the guest memory address to apply
5476 * segmentation to. Input and output parameter.
5477 */
5478VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5479{
5480 if (iSegReg == UINT8_MAX)
5481 return VINF_SUCCESS;
5482
5483 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5484 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5485 switch (IEM_GET_CPU_MODE(pVCpu))
5486 {
5487 case IEMMODE_16BIT:
5488 case IEMMODE_32BIT:
5489 {
5490 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5491 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5492
5493 if ( pSel->Attr.n.u1Present
5494 && !pSel->Attr.n.u1Unusable)
5495 {
5496 Assert(pSel->Attr.n.u1DescType);
5497 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5498 {
5499 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5500 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5501 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5502
5503 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5504 {
5505 /** @todo CPL check. */
5506 }
5507
5508 /*
5509 * There are two kinds of data selectors, normal and expand down.
5510 */
5511 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5512 {
5513 if ( GCPtrFirst32 > pSel->u32Limit
5514 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5515 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5516 }
5517 else
5518 {
5519 /*
5520 * The upper boundary is defined by the B bit, not the G bit!
5521 */
5522 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5523 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5524 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5525 }
5526 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5527 }
5528 else
5529 {
5530 /*
5531 * Code selector and usually be used to read thru, writing is
5532 * only permitted in real and V8086 mode.
5533 */
5534 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5535 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5536 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5537 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5538 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5539
5540 if ( GCPtrFirst32 > pSel->u32Limit
5541 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5542 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5543
5544 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5545 {
5546 /** @todo CPL check. */
5547 }
5548
5549 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5550 }
5551 }
5552 else
5553 return iemRaiseGeneralProtectionFault0(pVCpu);
5554 return VINF_SUCCESS;
5555 }
5556
5557 case IEMMODE_64BIT:
5558 {
5559 RTGCPTR GCPtrMem = *pGCPtrMem;
5560 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5561 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5562
5563 Assert(cbMem >= 1);
5564 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5565 return VINF_SUCCESS;
5566 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5567 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5568 return iemRaiseGeneralProtectionFault0(pVCpu);
5569 }
5570
5571 default:
5572 AssertFailedReturn(VERR_IEM_IPE_7);
5573 }
5574}
5575
5576
5577/**
5578 * Translates a virtual address to a physical physical address and checks if we
5579 * can access the page as specified.
5580 *
5581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5582 * @param GCPtrMem The virtual address.
5583 * @param cbAccess The access size, for raising \#PF correctly for
5584 * FXSAVE and such.
5585 * @param fAccess The intended access.
5586 * @param pGCPhysMem Where to return the physical address.
5587 */
5588VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5589 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5590{
5591 /** @todo Need a different PGM interface here. We're currently using
5592 * generic / REM interfaces. this won't cut it for R0. */
5593 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5594 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5595 * here. */
5596 PGMPTWALK Walk;
5597 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5598 if (RT_FAILURE(rc))
5599 {
5600 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5601 /** @todo Check unassigned memory in unpaged mode. */
5602 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5604 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5605 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5606#endif
5607 *pGCPhysMem = NIL_RTGCPHYS;
5608 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5609 }
5610
5611 /* If the page is writable and does not have the no-exec bit set, all
5612 access is allowed. Otherwise we'll have to check more carefully... */
5613 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5614 {
5615 /* Write to read only memory? */
5616 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5617 && !(Walk.fEffective & X86_PTE_RW)
5618 && ( ( IEM_GET_CPL(pVCpu) == 3
5619 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5620 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5621 {
5622 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5623 *pGCPhysMem = NIL_RTGCPHYS;
5624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5625 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5626 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5627#endif
5628 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5629 }
5630
5631 /* Kernel memory accessed by userland? */
5632 if ( !(Walk.fEffective & X86_PTE_US)
5633 && IEM_GET_CPL(pVCpu) == 3
5634 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5635 {
5636 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5637 *pGCPhysMem = NIL_RTGCPHYS;
5638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5639 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5640 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5641#endif
5642 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5643 }
5644
5645 /* Executing non-executable memory? */
5646 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5647 && (Walk.fEffective & X86_PTE_PAE_NX)
5648 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5649 {
5650 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5651 *pGCPhysMem = NIL_RTGCPHYS;
5652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5653 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5654 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5655#endif
5656 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5657 VERR_ACCESS_DENIED);
5658 }
5659 }
5660
5661 /*
5662 * Set the dirty / access flags.
5663 * ASSUMES this is set when the address is translated rather than on committ...
5664 */
5665 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5666 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5667 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5668 {
5669 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5670 AssertRC(rc2);
5671 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5672 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5673 }
5674
5675 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5676 *pGCPhysMem = GCPhys;
5677 return VINF_SUCCESS;
5678}
5679
5680
5681/**
5682 * Looks up a memory mapping entry.
5683 *
5684 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5686 * @param pvMem The memory address.
5687 * @param fAccess The access to.
5688 */
5689DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5690{
5691 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5692 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5693 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5694 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5695 return 0;
5696 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5697 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5698 return 1;
5699 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5700 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5701 return 2;
5702 return VERR_NOT_FOUND;
5703}
5704
5705
5706/**
5707 * Finds a free memmap entry when using iNextMapping doesn't work.
5708 *
5709 * @returns Memory mapping index, 1024 on failure.
5710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5711 */
5712static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5713{
5714 /*
5715 * The easy case.
5716 */
5717 if (pVCpu->iem.s.cActiveMappings == 0)
5718 {
5719 pVCpu->iem.s.iNextMapping = 1;
5720 return 0;
5721 }
5722
5723 /* There should be enough mappings for all instructions. */
5724 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5725
5726 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5727 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5728 return i;
5729
5730 AssertFailedReturn(1024);
5731}
5732
5733
5734/**
5735 * Commits a bounce buffer that needs writing back and unmaps it.
5736 *
5737 * @returns Strict VBox status code.
5738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5739 * @param iMemMap The index of the buffer to commit.
5740 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5741 * Always false in ring-3, obviously.
5742 */
5743static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5744{
5745 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5746 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5747#ifdef IN_RING3
5748 Assert(!fPostponeFail);
5749 RT_NOREF_PV(fPostponeFail);
5750#endif
5751
5752 /*
5753 * Do the writing.
5754 */
5755 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5756 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5757 {
5758 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5759 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5760 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5761 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5762 {
5763 /*
5764 * Carefully and efficiently dealing with access handler return
5765 * codes make this a little bloated.
5766 */
5767 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5768 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5769 pbBuf,
5770 cbFirst,
5771 PGMACCESSORIGIN_IEM);
5772 if (rcStrict == VINF_SUCCESS)
5773 {
5774 if (cbSecond)
5775 {
5776 rcStrict = PGMPhysWrite(pVM,
5777 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5778 pbBuf + cbFirst,
5779 cbSecond,
5780 PGMACCESSORIGIN_IEM);
5781 if (rcStrict == VINF_SUCCESS)
5782 { /* nothing */ }
5783 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5784 {
5785 LogEx(LOG_GROUP_IEM,
5786 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5789 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5790 }
5791#ifndef IN_RING3
5792 else if (fPostponeFail)
5793 {
5794 LogEx(LOG_GROUP_IEM,
5795 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5797 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5798 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5799 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5800 return iemSetPassUpStatus(pVCpu, rcStrict);
5801 }
5802#endif
5803 else
5804 {
5805 LogEx(LOG_GROUP_IEM,
5806 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5809 return rcStrict;
5810 }
5811 }
5812 }
5813 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5814 {
5815 if (!cbSecond)
5816 {
5817 LogEx(LOG_GROUP_IEM,
5818 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5820 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5821 }
5822 else
5823 {
5824 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5826 pbBuf + cbFirst,
5827 cbSecond,
5828 PGMACCESSORIGIN_IEM);
5829 if (rcStrict2 == VINF_SUCCESS)
5830 {
5831 LogEx(LOG_GROUP_IEM,
5832 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5836 }
5837 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5838 {
5839 LogEx(LOG_GROUP_IEM,
5840 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5843 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5844 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5845 }
5846#ifndef IN_RING3
5847 else if (fPostponeFail)
5848 {
5849 LogEx(LOG_GROUP_IEM,
5850 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5853 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5854 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5855 return iemSetPassUpStatus(pVCpu, rcStrict);
5856 }
5857#endif
5858 else
5859 {
5860 LogEx(LOG_GROUP_IEM,
5861 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5864 return rcStrict2;
5865 }
5866 }
5867 }
5868#ifndef IN_RING3
5869 else if (fPostponeFail)
5870 {
5871 LogEx(LOG_GROUP_IEM,
5872 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5875 if (!cbSecond)
5876 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5877 else
5878 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5879 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5880 return iemSetPassUpStatus(pVCpu, rcStrict);
5881 }
5882#endif
5883 else
5884 {
5885 LogEx(LOG_GROUP_IEM,
5886 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5889 return rcStrict;
5890 }
5891 }
5892 else
5893 {
5894 /*
5895 * No access handlers, much simpler.
5896 */
5897 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5898 if (RT_SUCCESS(rc))
5899 {
5900 if (cbSecond)
5901 {
5902 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5903 if (RT_SUCCESS(rc))
5904 { /* likely */ }
5905 else
5906 {
5907 LogEx(LOG_GROUP_IEM,
5908 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5911 return rc;
5912 }
5913 }
5914 }
5915 else
5916 {
5917 LogEx(LOG_GROUP_IEM,
5918 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5919 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5921 return rc;
5922 }
5923 }
5924 }
5925
5926#if defined(IEM_LOG_MEMORY_WRITES)
5927 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5928 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5929 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5930 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5931 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5932 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5933
5934 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5935 g_cbIemWrote = cbWrote;
5936 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5937#endif
5938
5939 /*
5940 * Free the mapping entry.
5941 */
5942 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5943 Assert(pVCpu->iem.s.cActiveMappings != 0);
5944 pVCpu->iem.s.cActiveMappings--;
5945 return VINF_SUCCESS;
5946}
5947
5948
5949/**
5950 * iemMemMap worker that deals with a request crossing pages.
5951 */
5952static VBOXSTRICTRC
5953iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5954 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5955{
5956 Assert(cbMem <= GUEST_PAGE_SIZE);
5957
5958 /*
5959 * Do the address translations.
5960 */
5961 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5962 RTGCPHYS GCPhysFirst;
5963 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5964 if (rcStrict != VINF_SUCCESS)
5965 return rcStrict;
5966 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5967
5968 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5969 RTGCPHYS GCPhysSecond;
5970 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5971 cbSecondPage, fAccess, &GCPhysSecond);
5972 if (rcStrict != VINF_SUCCESS)
5973 return rcStrict;
5974 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5975 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5976
5977 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5978
5979 /*
5980 * Read in the current memory content if it's a read, execute or partial
5981 * write access.
5982 */
5983 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5984
5985 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5986 {
5987 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5988 {
5989 /*
5990 * Must carefully deal with access handler status codes here,
5991 * makes the code a bit bloated.
5992 */
5993 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5994 if (rcStrict == VINF_SUCCESS)
5995 {
5996 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5997 if (rcStrict == VINF_SUCCESS)
5998 { /*likely */ }
5999 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6000 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6001 else
6002 {
6003 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6004 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6005 return rcStrict;
6006 }
6007 }
6008 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6009 {
6010 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6011 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6012 {
6013 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6014 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6015 }
6016 else
6017 {
6018 LogEx(LOG_GROUP_IEM,
6019 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6020 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6021 return rcStrict2;
6022 }
6023 }
6024 else
6025 {
6026 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6027 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6028 return rcStrict;
6029 }
6030 }
6031 else
6032 {
6033 /*
6034 * No informational status codes here, much more straight forward.
6035 */
6036 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6037 if (RT_SUCCESS(rc))
6038 {
6039 Assert(rc == VINF_SUCCESS);
6040 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6041 if (RT_SUCCESS(rc))
6042 Assert(rc == VINF_SUCCESS);
6043 else
6044 {
6045 LogEx(LOG_GROUP_IEM,
6046 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6047 return rc;
6048 }
6049 }
6050 else
6051 {
6052 LogEx(LOG_GROUP_IEM,
6053 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6054 return rc;
6055 }
6056 }
6057 }
6058#ifdef VBOX_STRICT
6059 else
6060 memset(pbBuf, 0xcc, cbMem);
6061 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6062 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6063#endif
6064 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6065
6066 /*
6067 * Commit the bounce buffer entry.
6068 */
6069 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6070 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6071 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6074 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6075 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6076 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6077 pVCpu->iem.s.cActiveMappings++;
6078
6079 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6080 *ppvMem = pbBuf;
6081 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6082 return VINF_SUCCESS;
6083}
6084
6085
6086/**
6087 * iemMemMap woker that deals with iemMemPageMap failures.
6088 */
6089static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6090 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6091{
6092 /*
6093 * Filter out conditions we can handle and the ones which shouldn't happen.
6094 */
6095 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6096 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6097 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6098 {
6099 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6100 return rcMap;
6101 }
6102 pVCpu->iem.s.cPotentialExits++;
6103
6104 /*
6105 * Read in the current memory content if it's a read, execute or partial
6106 * write access.
6107 */
6108 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6109 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6110 {
6111 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6112 memset(pbBuf, 0xff, cbMem);
6113 else
6114 {
6115 int rc;
6116 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6117 {
6118 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6119 if (rcStrict == VINF_SUCCESS)
6120 { /* nothing */ }
6121 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6122 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6123 else
6124 {
6125 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6126 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6127 return rcStrict;
6128 }
6129 }
6130 else
6131 {
6132 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6133 if (RT_SUCCESS(rc))
6134 { /* likely */ }
6135 else
6136 {
6137 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6138 GCPhysFirst, rc));
6139 return rc;
6140 }
6141 }
6142 }
6143 }
6144#ifdef VBOX_STRICT
6145 else
6146 memset(pbBuf, 0xcc, cbMem);
6147#endif
6148#ifdef VBOX_STRICT
6149 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6150 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6151#endif
6152
6153 /*
6154 * Commit the bounce buffer entry.
6155 */
6156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6161 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6162 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6163 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6164 pVCpu->iem.s.cActiveMappings++;
6165
6166 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6167 *ppvMem = pbBuf;
6168 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6169 return VINF_SUCCESS;
6170}
6171
6172
6173
6174/**
6175 * Maps the specified guest memory for the given kind of access.
6176 *
6177 * This may be using bounce buffering of the memory if it's crossing a page
6178 * boundary or if there is an access handler installed for any of it. Because
6179 * of lock prefix guarantees, we're in for some extra clutter when this
6180 * happens.
6181 *
6182 * This may raise a \#GP, \#SS, \#PF or \#AC.
6183 *
6184 * @returns VBox strict status code.
6185 *
6186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6187 * @param ppvMem Where to return the pointer to the mapped memory.
6188 * @param pbUnmapInfo Where to return unmap info to be passed to
6189 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6190 * done.
6191 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6192 * 8, 12, 16, 32 or 512. When used by string operations
6193 * it can be up to a page.
6194 * @param iSegReg The index of the segment register to use for this
6195 * access. The base and limits are checked. Use UINT8_MAX
6196 * to indicate that no segmentation is required (for IDT,
6197 * GDT and LDT accesses).
6198 * @param GCPtrMem The address of the guest memory.
6199 * @param fAccess How the memory is being accessed. The
6200 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6201 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6202 * when raising exceptions.
6203 * @param uAlignCtl Alignment control:
6204 * - Bits 15:0 is the alignment mask.
6205 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6206 * IEM_MEMMAP_F_ALIGN_SSE, and
6207 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6208 * Pass zero to skip alignment.
6209 */
6210VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6211 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6212{
6213 /*
6214 * Check the input and figure out which mapping entry to use.
6215 */
6216 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6217 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6218 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6219 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6220 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6221
6222 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6223 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6224 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6225 {
6226 iMemMap = iemMemMapFindFree(pVCpu);
6227 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6228 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6229 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6230 pVCpu->iem.s.aMemMappings[2].fAccess),
6231 VERR_IEM_IPE_9);
6232 }
6233
6234 /*
6235 * Map the memory, checking that we can actually access it. If something
6236 * slightly complicated happens, fall back on bounce buffering.
6237 */
6238 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6239 if (rcStrict == VINF_SUCCESS)
6240 { /* likely */ }
6241 else
6242 return rcStrict;
6243
6244 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6245 { /* likely */ }
6246 else
6247 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6248
6249 /*
6250 * Alignment check.
6251 */
6252 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6253 { /* likelyish */ }
6254 else
6255 {
6256 /* Misaligned access. */
6257 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6258 {
6259 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6260 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6261 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6262 {
6263 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6264
6265 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6266 return iemRaiseAlignmentCheckException(pVCpu);
6267 }
6268 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6269 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6270 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6271 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6272 * that's what FXSAVE does on a 10980xe. */
6273 && iemMemAreAlignmentChecksEnabled(pVCpu))
6274 return iemRaiseAlignmentCheckException(pVCpu);
6275 else
6276 return iemRaiseGeneralProtectionFault0(pVCpu);
6277 }
6278 }
6279
6280#ifdef IEM_WITH_DATA_TLB
6281 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6282
6283 /*
6284 * Get the TLB entry for this page.
6285 */
6286 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6287 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6288 if (pTlbe->uTag == uTag)
6289 {
6290# ifdef VBOX_WITH_STATISTICS
6291 pVCpu->iem.s.DataTlb.cTlbHits++;
6292# endif
6293 }
6294 else
6295 {
6296 pVCpu->iem.s.DataTlb.cTlbMisses++;
6297 PGMPTWALK Walk;
6298 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6299 if (RT_FAILURE(rc))
6300 {
6301 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6302# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6303 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6304 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6305# endif
6306 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6307 }
6308
6309 Assert(Walk.fSucceeded);
6310 pTlbe->uTag = uTag;
6311 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6312 pTlbe->GCPhys = Walk.GCPhys;
6313 pTlbe->pbMappingR3 = NULL;
6314 }
6315
6316 /*
6317 * Check TLB page table level access flags.
6318 */
6319 /* If the page is either supervisor only or non-writable, we need to do
6320 more careful access checks. */
6321 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6322 {
6323 /* Write to read only memory? */
6324 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6325 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6326 && ( ( IEM_GET_CPL(pVCpu) == 3
6327 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6328 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6329 {
6330 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6331# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6332 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6333 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6334# endif
6335 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6336 }
6337
6338 /* Kernel memory accessed by userland? */
6339 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6340 && IEM_GET_CPL(pVCpu) == 3
6341 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6342 {
6343 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6344# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6345 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6346 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6347# endif
6348 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6349 }
6350 }
6351
6352 /*
6353 * Set the dirty / access flags.
6354 * ASSUMES this is set when the address is translated rather than on commit...
6355 */
6356 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6357 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6358 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6359 {
6360 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6361 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6362 AssertRC(rc2);
6363 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6364 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6365 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6366 }
6367
6368 /*
6369 * Look up the physical page info if necessary.
6370 */
6371 uint8_t *pbMem = NULL;
6372 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6373# ifdef IN_RING3
6374 pbMem = pTlbe->pbMappingR3;
6375# else
6376 pbMem = NULL;
6377# endif
6378 else
6379 {
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6382 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6383 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6384 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6385 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6386 { /* likely */ }
6387 else
6388 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6389 pTlbe->pbMappingR3 = NULL;
6390 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6391 | IEMTLBE_F_NO_MAPPINGR3
6392 | IEMTLBE_F_PG_NO_READ
6393 | IEMTLBE_F_PG_NO_WRITE
6394 | IEMTLBE_F_PG_UNASSIGNED
6395 | IEMTLBE_F_PG_CODE_PAGE);
6396 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6397 &pbMem, &pTlbe->fFlagsAndPhysRev);
6398 AssertRCReturn(rc, rc);
6399# ifdef IN_RING3
6400 pTlbe->pbMappingR3 = pbMem;
6401# endif
6402 }
6403
6404 /*
6405 * Check the physical page level access and mapping.
6406 */
6407 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6408 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6409 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6410 { /* probably likely */ }
6411 else
6412 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6413 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6414 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6415 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6416 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6417 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6418
6419 if (pbMem)
6420 {
6421 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6422 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6423 fAccess |= IEM_ACCESS_NOT_LOCKED;
6424 }
6425 else
6426 {
6427 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6428 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6429 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6430 if (rcStrict != VINF_SUCCESS)
6431 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6432 }
6433
6434 void * const pvMem = pbMem;
6435
6436 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6437 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6438 if (fAccess & IEM_ACCESS_TYPE_READ)
6439 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6440
6441#else /* !IEM_WITH_DATA_TLB */
6442
6443 RTGCPHYS GCPhysFirst;
6444 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6445 if (rcStrict != VINF_SUCCESS)
6446 return rcStrict;
6447
6448 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6449 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6450 if (fAccess & IEM_ACCESS_TYPE_READ)
6451 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6452
6453 void *pvMem;
6454 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6455 if (rcStrict != VINF_SUCCESS)
6456 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6457
6458#endif /* !IEM_WITH_DATA_TLB */
6459
6460 /*
6461 * Fill in the mapping table entry.
6462 */
6463 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6464 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6465 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6466 pVCpu->iem.s.cActiveMappings += 1;
6467
6468 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6469 *ppvMem = pvMem;
6470 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6471 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6472 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6473
6474 return VINF_SUCCESS;
6475}
6476
6477
6478/**
6479 * Commits the guest memory if bounce buffered and unmaps it.
6480 *
6481 * @returns Strict VBox status code.
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param bUnmapInfo Unmap info set by iemMemMap.
6484 */
6485VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6486{
6487 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6488 AssertMsgReturn( (bUnmapInfo & 0x08)
6489 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6490 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6491 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6492 VERR_NOT_FOUND);
6493
6494 /* If it's bounce buffered, we may need to write back the buffer. */
6495 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6496 {
6497 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6498 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6499 }
6500 /* Otherwise unlock it. */
6501 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6502 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6503
6504 /* Free the entry. */
6505 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6506 Assert(pVCpu->iem.s.cActiveMappings != 0);
6507 pVCpu->iem.s.cActiveMappings--;
6508 return VINF_SUCCESS;
6509}
6510
6511
6512/**
6513 * Rolls back the guest memory (conceptually only) and unmaps it.
6514 *
6515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6516 * @param bUnmapInfo Unmap info set by iemMemMap.
6517 */
6518void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6519{
6520 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6521 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6522 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6523 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6524 == ((unsigned)bUnmapInfo >> 4),
6525 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6526
6527 /* Unlock it if necessary. */
6528 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6529 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6530
6531 /* Free the entry. */
6532 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6533 Assert(pVCpu->iem.s.cActiveMappings != 0);
6534 pVCpu->iem.s.cActiveMappings--;
6535}
6536
6537#ifdef IEM_WITH_SETJMP
6538
6539/**
6540 * Maps the specified guest memory for the given kind of access, longjmp on
6541 * error.
6542 *
6543 * This may be using bounce buffering of the memory if it's crossing a page
6544 * boundary or if there is an access handler installed for any of it. Because
6545 * of lock prefix guarantees, we're in for some extra clutter when this
6546 * happens.
6547 *
6548 * This may raise a \#GP, \#SS, \#PF or \#AC.
6549 *
6550 * @returns Pointer to the mapped memory.
6551 *
6552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6553 * @param bUnmapInfo Where to return unmap info to be passed to
6554 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6555 * iemMemCommitAndUnmapWoSafeJmp,
6556 * iemMemCommitAndUnmapRoSafeJmp,
6557 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6558 * when done.
6559 * @param cbMem The number of bytes to map. This is usually 1,
6560 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6561 * string operations it can be up to a page.
6562 * @param iSegReg The index of the segment register to use for
6563 * this access. The base and limits are checked.
6564 * Use UINT8_MAX to indicate that no segmentation
6565 * is required (for IDT, GDT and LDT accesses).
6566 * @param GCPtrMem The address of the guest memory.
6567 * @param fAccess How the memory is being accessed. The
6568 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6569 * how to map the memory, while the
6570 * IEM_ACCESS_WHAT_XXX bit is used when raising
6571 * exceptions.
6572 * @param uAlignCtl Alignment control:
6573 * - Bits 15:0 is the alignment mask.
6574 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6575 * IEM_MEMMAP_F_ALIGN_SSE, and
6576 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6577 * Pass zero to skip alignment.
6578 */
6579void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6580 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6581{
6582 /*
6583 * Check the input, check segment access and adjust address
6584 * with segment base.
6585 */
6586 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6587 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6588 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6589
6590 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6591 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6592 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6593
6594 /*
6595 * Alignment check.
6596 */
6597 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6598 { /* likelyish */ }
6599 else
6600 {
6601 /* Misaligned access. */
6602 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6603 {
6604 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6605 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6606 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6607 {
6608 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6609
6610 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6611 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6612 }
6613 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6614 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6615 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6616 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6617 * that's what FXSAVE does on a 10980xe. */
6618 && iemMemAreAlignmentChecksEnabled(pVCpu))
6619 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6620 else
6621 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6622 }
6623 }
6624
6625 /*
6626 * Figure out which mapping entry to use.
6627 */
6628 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6629 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6630 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6631 {
6632 iMemMap = iemMemMapFindFree(pVCpu);
6633 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6634 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6635 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6636 pVCpu->iem.s.aMemMappings[2].fAccess),
6637 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6638 }
6639
6640 /*
6641 * Crossing a page boundary?
6642 */
6643 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6644 { /* No (likely). */ }
6645 else
6646 {
6647 void *pvMem;
6648 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6649 if (rcStrict == VINF_SUCCESS)
6650 return pvMem;
6651 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6652 }
6653
6654#ifdef IEM_WITH_DATA_TLB
6655 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6656
6657 /*
6658 * Get the TLB entry for this page.
6659 */
6660 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6661 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6662 if (pTlbe->uTag == uTag)
6663 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6664 else
6665 {
6666 pVCpu->iem.s.DataTlb.cTlbMisses++;
6667 PGMPTWALK Walk;
6668 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6669 if (RT_FAILURE(rc))
6670 {
6671 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6672# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6673 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6674 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6675# endif
6676 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6677 }
6678
6679 Assert(Walk.fSucceeded);
6680 pTlbe->uTag = uTag;
6681 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6682 pTlbe->GCPhys = Walk.GCPhys;
6683 pTlbe->pbMappingR3 = NULL;
6684 }
6685
6686 /*
6687 * Check the flags and physical revision.
6688 */
6689 /** @todo make the caller pass these in with fAccess. */
6690 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6691 ? IEMTLBE_F_PT_NO_USER : 0;
6692 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6693 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6694 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6695 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6696 ? IEMTLBE_F_PT_NO_WRITE : 0)
6697 : 0;
6698 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6699 uint8_t *pbMem = NULL;
6700 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6701 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6702# ifdef IN_RING3
6703 pbMem = pTlbe->pbMappingR3;
6704# else
6705 pbMem = NULL;
6706# endif
6707 else
6708 {
6709 /*
6710 * Okay, something isn't quite right or needs refreshing.
6711 */
6712 /* Write to read only memory? */
6713 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6714 {
6715 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6716# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6717 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6718 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6719# endif
6720 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6721 }
6722
6723 /* Kernel memory accessed by userland? */
6724 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6725 {
6726 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6727# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6728 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6729 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6730# endif
6731 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6732 }
6733
6734 /* Set the dirty / access flags.
6735 ASSUMES this is set when the address is translated rather than on commit... */
6736 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6737 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6738 {
6739 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6740 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6741 AssertRC(rc2);
6742 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6743 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6744 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6745 }
6746
6747 /*
6748 * Check if the physical page info needs updating.
6749 */
6750 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6751# ifdef IN_RING3
6752 pbMem = pTlbe->pbMappingR3;
6753# else
6754 pbMem = NULL;
6755# endif
6756 else
6757 {
6758 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6759 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6760 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6761 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6762 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6763 pTlbe->pbMappingR3 = NULL;
6764 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6765 | IEMTLBE_F_NO_MAPPINGR3
6766 | IEMTLBE_F_PG_NO_READ
6767 | IEMTLBE_F_PG_NO_WRITE
6768 | IEMTLBE_F_PG_UNASSIGNED
6769 | IEMTLBE_F_PG_CODE_PAGE);
6770 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6771 &pbMem, &pTlbe->fFlagsAndPhysRev);
6772 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6773# ifdef IN_RING3
6774 pTlbe->pbMappingR3 = pbMem;
6775# endif
6776 }
6777
6778 /*
6779 * Check the physical page level access and mapping.
6780 */
6781 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6782 { /* probably likely */ }
6783 else
6784 {
6785 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6786 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6787 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6788 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6789 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6790 if (rcStrict == VINF_SUCCESS)
6791 return pbMem;
6792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6793 }
6794 }
6795 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6796
6797 if (pbMem)
6798 {
6799 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6800 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6801 fAccess |= IEM_ACCESS_NOT_LOCKED;
6802 }
6803 else
6804 {
6805 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6806 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6807 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6808 if (rcStrict == VINF_SUCCESS)
6809 {
6810 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6811 return pbMem;
6812 }
6813 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6814 }
6815
6816 void * const pvMem = pbMem;
6817
6818 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6819 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6820 if (fAccess & IEM_ACCESS_TYPE_READ)
6821 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6822
6823#else /* !IEM_WITH_DATA_TLB */
6824
6825
6826 RTGCPHYS GCPhysFirst;
6827 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6828 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6829 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6830
6831 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6832 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6833 if (fAccess & IEM_ACCESS_TYPE_READ)
6834 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6835
6836 void *pvMem;
6837 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6838 if (rcStrict == VINF_SUCCESS)
6839 { /* likely */ }
6840 else
6841 {
6842 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6843 if (rcStrict == VINF_SUCCESS)
6844 return pvMem;
6845 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6846 }
6847
6848#endif /* !IEM_WITH_DATA_TLB */
6849
6850 /*
6851 * Fill in the mapping table entry.
6852 */
6853 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6854 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6855 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6856 pVCpu->iem.s.cActiveMappings++;
6857
6858 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6859
6860 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6861 return pvMem;
6862}
6863
6864
6865/**
6866 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6867 *
6868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6869 * @param pvMem The mapping.
6870 * @param fAccess The kind of access.
6871 */
6872void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6873{
6874 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6875 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6876 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6877 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6878 == ((unsigned)bUnmapInfo >> 4),
6879 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6880
6881 /* If it's bounce buffered, we may need to write back the buffer. */
6882 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6883 {
6884 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6885 {
6886 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6887 if (rcStrict == VINF_SUCCESS)
6888 return;
6889 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6890 }
6891 }
6892 /* Otherwise unlock it. */
6893 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6894 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6895
6896 /* Free the entry. */
6897 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6898 Assert(pVCpu->iem.s.cActiveMappings != 0);
6899 pVCpu->iem.s.cActiveMappings--;
6900}
6901
6902
6903/** Fallback for iemMemCommitAndUnmapRwJmp. */
6904void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6905{
6906 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6907 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6908}
6909
6910
6911/** Fallback for iemMemCommitAndUnmapWoJmp. */
6912void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6913{
6914 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6915 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6916}
6917
6918
6919/** Fallback for iemMemCommitAndUnmapRoJmp. */
6920void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6921{
6922 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
6923 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6924}
6925
6926
6927/** Fallback for iemMemRollbackAndUnmapWo. */
6928void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6929{
6930 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6931 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
6932}
6933
6934#endif /* IEM_WITH_SETJMP */
6935
6936#ifndef IN_RING3
6937/**
6938 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6939 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6940 *
6941 * Allows the instruction to be completed and retired, while the IEM user will
6942 * return to ring-3 immediately afterwards and do the postponed writes there.
6943 *
6944 * @returns VBox status code (no strict statuses). Caller must check
6945 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 * @param pvMem The mapping.
6948 * @param fAccess The kind of access.
6949 */
6950VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6951{
6952 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6953 AssertMsgReturn( (bUnmapInfo & 0x08)
6954 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6955 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6956 == ((unsigned)bUnmapInfo >> 4),
6957 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6958 VERR_NOT_FOUND);
6959
6960 /* If it's bounce buffered, we may need to write back the buffer. */
6961 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6962 {
6963 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6964 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6965 }
6966 /* Otherwise unlock it. */
6967 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6968 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6969
6970 /* Free the entry. */
6971 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6972 Assert(pVCpu->iem.s.cActiveMappings != 0);
6973 pVCpu->iem.s.cActiveMappings--;
6974 return VINF_SUCCESS;
6975}
6976#endif
6977
6978
6979/**
6980 * Rollbacks mappings, releasing page locks and such.
6981 *
6982 * The caller shall only call this after checking cActiveMappings.
6983 *
6984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6985 */
6986void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6987{
6988 Assert(pVCpu->iem.s.cActiveMappings > 0);
6989
6990 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6991 while (iMemMap-- > 0)
6992 {
6993 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6994 if (fAccess != IEM_ACCESS_INVALID)
6995 {
6996 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6997 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6998 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6999 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7000 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7001 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7002 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7003 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7004 pVCpu->iem.s.cActiveMappings--;
7005 }
7006 }
7007}
7008
7009
7010/*
7011 * Instantiate R/W templates.
7012 */
7013#define TMPL_MEM_WITH_STACK
7014
7015#define TMPL_MEM_TYPE uint8_t
7016#define TMPL_MEM_FN_SUFF U8
7017#define TMPL_MEM_FMT_TYPE "%#04x"
7018#define TMPL_MEM_FMT_DESC "byte"
7019#include "IEMAllMemRWTmpl.cpp.h"
7020
7021#define TMPL_MEM_TYPE uint16_t
7022#define TMPL_MEM_FN_SUFF U16
7023#define TMPL_MEM_FMT_TYPE "%#06x"
7024#define TMPL_MEM_FMT_DESC "word"
7025#include "IEMAllMemRWTmpl.cpp.h"
7026
7027#define TMPL_WITH_PUSH_SREG
7028#define TMPL_MEM_TYPE uint32_t
7029#define TMPL_MEM_FN_SUFF U32
7030#define TMPL_MEM_FMT_TYPE "%#010x"
7031#define TMPL_MEM_FMT_DESC "dword"
7032#include "IEMAllMemRWTmpl.cpp.h"
7033#undef TMPL_WITH_PUSH_SREG
7034
7035#define TMPL_MEM_TYPE uint64_t
7036#define TMPL_MEM_FN_SUFF U64
7037#define TMPL_MEM_FMT_TYPE "%#018RX64"
7038#define TMPL_MEM_FMT_DESC "qword"
7039#include "IEMAllMemRWTmpl.cpp.h"
7040
7041#undef TMPL_MEM_WITH_STACK
7042
7043#define TMPL_MEM_TYPE uint64_t
7044#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7045#define TMPL_MEM_FN_SUFF U64AlignedU128
7046#define TMPL_MEM_FMT_TYPE "%#018RX64"
7047#define TMPL_MEM_FMT_DESC "qword"
7048#include "IEMAllMemRWTmpl.cpp.h"
7049
7050/* See IEMAllMemRWTmplInline.cpp.h */
7051#define TMPL_MEM_BY_REF
7052
7053#define TMPL_MEM_TYPE RTFLOAT80U
7054#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7055#define TMPL_MEM_FN_SUFF R80
7056#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7057#define TMPL_MEM_FMT_DESC "tword"
7058#include "IEMAllMemRWTmpl.cpp.h"
7059
7060#define TMPL_MEM_TYPE RTPBCD80U
7061#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7062#define TMPL_MEM_FN_SUFF D80
7063#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7064#define TMPL_MEM_FMT_DESC "tword"
7065#include "IEMAllMemRWTmpl.cpp.h"
7066
7067#define TMPL_MEM_TYPE RTUINT128U
7068#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7069#define TMPL_MEM_FN_SUFF U128
7070#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7071#define TMPL_MEM_FMT_DESC "dqword"
7072#include "IEMAllMemRWTmpl.cpp.h"
7073
7074
7075/**
7076 * Fetches a data dword and zero extends it to a qword.
7077 *
7078 * @returns Strict VBox status code.
7079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7080 * @param pu64Dst Where to return the qword.
7081 * @param iSegReg The index of the segment register to use for
7082 * this access. The base and limits are checked.
7083 * @param GCPtrMem The address of the guest memory.
7084 */
7085VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7086{
7087 /* The lazy approach for now... */
7088 uint8_t bUnmapInfo;
7089 uint32_t const *pu32Src;
7090 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7091 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7092 if (rc == VINF_SUCCESS)
7093 {
7094 *pu64Dst = *pu32Src;
7095 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7096 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7097 }
7098 return rc;
7099}
7100
7101
7102#ifdef SOME_UNUSED_FUNCTION
7103/**
7104 * Fetches a data dword and sign extends it to a qword.
7105 *
7106 * @returns Strict VBox status code.
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 * @param pu64Dst Where to return the sign extended value.
7109 * @param iSegReg The index of the segment register to use for
7110 * this access. The base and limits are checked.
7111 * @param GCPtrMem The address of the guest memory.
7112 */
7113VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7114{
7115 /* The lazy approach for now... */
7116 uint8_t bUnmapInfo;
7117 int32_t const *pi32Src;
7118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7119 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7120 if (rc == VINF_SUCCESS)
7121 {
7122 *pu64Dst = *pi32Src;
7123 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7124 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7125 }
7126#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7127 else
7128 *pu64Dst = 0;
7129#endif
7130 return rc;
7131}
7132#endif
7133
7134
7135/**
7136 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7137 * related.
7138 *
7139 * Raises \#GP(0) if not aligned.
7140 *
7141 * @returns Strict VBox status code.
7142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7143 * @param pu128Dst Where to return the qword.
7144 * @param iSegReg The index of the segment register to use for
7145 * this access. The base and limits are checked.
7146 * @param GCPtrMem The address of the guest memory.
7147 */
7148VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7149{
7150 /* The lazy approach for now... */
7151 uint8_t bUnmapInfo;
7152 PCRTUINT128U pu128Src;
7153 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7154 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7155 if (rc == VINF_SUCCESS)
7156 {
7157 pu128Dst->au64[0] = pu128Src->au64[0];
7158 pu128Dst->au64[1] = pu128Src->au64[1];
7159 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7160 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7161 }
7162 return rc;
7163}
7164
7165
7166#ifdef IEM_WITH_SETJMP
7167/**
7168 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7169 * related, longjmp on error.
7170 *
7171 * Raises \#GP(0) if not aligned.
7172 *
7173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7174 * @param pu128Dst Where to return the qword.
7175 * @param iSegReg The index of the segment register to use for
7176 * this access. The base and limits are checked.
7177 * @param GCPtrMem The address of the guest memory.
7178 */
7179void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7180 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7181{
7182 /* The lazy approach for now... */
7183 uint8_t bUnmapInfo;
7184 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7185 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7186 pu128Dst->au64[0] = pu128Src->au64[0];
7187 pu128Dst->au64[1] = pu128Src->au64[1];
7188 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7189 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7190}
7191#endif
7192
7193
7194/**
7195 * Fetches a data oword (octo word), generally AVX related.
7196 *
7197 * @returns Strict VBox status code.
7198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7199 * @param pu256Dst Where to return the qword.
7200 * @param iSegReg The index of the segment register to use for
7201 * this access. The base and limits are checked.
7202 * @param GCPtrMem The address of the guest memory.
7203 */
7204VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7205{
7206 /* The lazy approach for now... */
7207 uint8_t bUnmapInfo;
7208 PCRTUINT256U pu256Src;
7209 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7210 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7211 if (rc == VINF_SUCCESS)
7212 {
7213 pu256Dst->au64[0] = pu256Src->au64[0];
7214 pu256Dst->au64[1] = pu256Src->au64[1];
7215 pu256Dst->au64[2] = pu256Src->au64[2];
7216 pu256Dst->au64[3] = pu256Src->au64[3];
7217 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7218 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7219 }
7220 return rc;
7221}
7222
7223
7224#ifdef IEM_WITH_SETJMP
7225/**
7226 * Fetches a data oword (octo word), generally AVX related.
7227 *
7228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7229 * @param pu256Dst Where to return the qword.
7230 * @param iSegReg The index of the segment register to use for
7231 * this access. The base and limits are checked.
7232 * @param GCPtrMem The address of the guest memory.
7233 */
7234void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7235{
7236 /* The lazy approach for now... */
7237 uint8_t bUnmapInfo;
7238 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7239 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7240 pu256Dst->au64[0] = pu256Src->au64[0];
7241 pu256Dst->au64[1] = pu256Src->au64[1];
7242 pu256Dst->au64[2] = pu256Src->au64[2];
7243 pu256Dst->au64[3] = pu256Src->au64[3];
7244 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7245 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7246}
7247#endif
7248
7249
7250/**
7251 * Fetches a data oword (octo word) at an aligned address, generally AVX
7252 * related.
7253 *
7254 * Raises \#GP(0) if not aligned.
7255 *
7256 * @returns Strict VBox status code.
7257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7258 * @param pu256Dst Where to return the qword.
7259 * @param iSegReg The index of the segment register to use for
7260 * this access. The base and limits are checked.
7261 * @param GCPtrMem The address of the guest memory.
7262 */
7263VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7264{
7265 /* The lazy approach for now... */
7266 uint8_t bUnmapInfo;
7267 PCRTUINT256U pu256Src;
7268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7269 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7270 if (rc == VINF_SUCCESS)
7271 {
7272 pu256Dst->au64[0] = pu256Src->au64[0];
7273 pu256Dst->au64[1] = pu256Src->au64[1];
7274 pu256Dst->au64[2] = pu256Src->au64[2];
7275 pu256Dst->au64[3] = pu256Src->au64[3];
7276 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7277 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7278 }
7279 return rc;
7280}
7281
7282
7283#ifdef IEM_WITH_SETJMP
7284/**
7285 * Fetches a data oword (octo word) at an aligned address, generally AVX
7286 * related, longjmp on error.
7287 *
7288 * Raises \#GP(0) if not aligned.
7289 *
7290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7291 * @param pu256Dst Where to return the qword.
7292 * @param iSegReg The index of the segment register to use for
7293 * this access. The base and limits are checked.
7294 * @param GCPtrMem The address of the guest memory.
7295 */
7296void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7297 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7298{
7299 /* The lazy approach for now... */
7300 uint8_t bUnmapInfo;
7301 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7302 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7303 pu256Dst->au64[0] = pu256Src->au64[0];
7304 pu256Dst->au64[1] = pu256Src->au64[1];
7305 pu256Dst->au64[2] = pu256Src->au64[2];
7306 pu256Dst->au64[3] = pu256Src->au64[3];
7307 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7308 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7309}
7310#endif
7311
7312
7313
7314/**
7315 * Fetches a descriptor register (lgdt, lidt).
7316 *
7317 * @returns Strict VBox status code.
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pcbLimit Where to return the limit.
7320 * @param pGCPtrBase Where to return the base.
7321 * @param iSegReg The index of the segment register to use for
7322 * this access. The base and limits are checked.
7323 * @param GCPtrMem The address of the guest memory.
7324 * @param enmOpSize The effective operand size.
7325 */
7326VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7327 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7328{
7329 /*
7330 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7331 * little special:
7332 * - The two reads are done separately.
7333 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7334 * - We suspect the 386 to actually commit the limit before the base in
7335 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7336 * don't try emulate this eccentric behavior, because it's not well
7337 * enough understood and rather hard to trigger.
7338 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7339 */
7340 VBOXSTRICTRC rcStrict;
7341 if (IEM_IS_64BIT_CODE(pVCpu))
7342 {
7343 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7344 if (rcStrict == VINF_SUCCESS)
7345 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7346 }
7347 else
7348 {
7349 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7350 if (enmOpSize == IEMMODE_32BIT)
7351 {
7352 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7353 {
7354 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7355 if (rcStrict == VINF_SUCCESS)
7356 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7357 }
7358 else
7359 {
7360 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7361 if (rcStrict == VINF_SUCCESS)
7362 {
7363 *pcbLimit = (uint16_t)uTmp;
7364 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7365 }
7366 }
7367 if (rcStrict == VINF_SUCCESS)
7368 *pGCPtrBase = uTmp;
7369 }
7370 else
7371 {
7372 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7373 if (rcStrict == VINF_SUCCESS)
7374 {
7375 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7376 if (rcStrict == VINF_SUCCESS)
7377 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7378 }
7379 }
7380 }
7381 return rcStrict;
7382}
7383
7384
7385/**
7386 * Stores a data dqword, SSE aligned.
7387 *
7388 * @returns Strict VBox status code.
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param iSegReg The index of the segment register to use for
7391 * this access. The base and limits are checked.
7392 * @param GCPtrMem The address of the guest memory.
7393 * @param u128Value The value to store.
7394 */
7395VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7396{
7397 /* The lazy approach for now... */
7398 uint8_t bUnmapInfo;
7399 PRTUINT128U pu128Dst;
7400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7401 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7402 if (rc == VINF_SUCCESS)
7403 {
7404 pu128Dst->au64[0] = u128Value.au64[0];
7405 pu128Dst->au64[1] = u128Value.au64[1];
7406 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7407 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7408 }
7409 return rc;
7410}
7411
7412
7413#ifdef IEM_WITH_SETJMP
7414/**
7415 * Stores a data dqword, SSE aligned.
7416 *
7417 * @returns Strict VBox status code.
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param iSegReg The index of the segment register to use for
7420 * this access. The base and limits are checked.
7421 * @param GCPtrMem The address of the guest memory.
7422 * @param u128Value The value to store.
7423 */
7424void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7425 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7426{
7427 /* The lazy approach for now... */
7428 uint8_t bUnmapInfo;
7429 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7430 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7431 pu128Dst->au64[0] = u128Value.au64[0];
7432 pu128Dst->au64[1] = u128Value.au64[1];
7433 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7434 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7435}
7436#endif
7437
7438
7439/**
7440 * Stores a data dqword.
7441 *
7442 * @returns Strict VBox status code.
7443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7444 * @param iSegReg The index of the segment register to use for
7445 * this access. The base and limits are checked.
7446 * @param GCPtrMem The address of the guest memory.
7447 * @param pu256Value Pointer to the value to store.
7448 */
7449VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7450{
7451 /* The lazy approach for now... */
7452 uint8_t bUnmapInfo;
7453 PRTUINT256U pu256Dst;
7454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7455 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7456 if (rc == VINF_SUCCESS)
7457 {
7458 pu256Dst->au64[0] = pu256Value->au64[0];
7459 pu256Dst->au64[1] = pu256Value->au64[1];
7460 pu256Dst->au64[2] = pu256Value->au64[2];
7461 pu256Dst->au64[3] = pu256Value->au64[3];
7462 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7463 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7464 }
7465 return rc;
7466}
7467
7468
7469#ifdef IEM_WITH_SETJMP
7470/**
7471 * Stores a data dqword, longjmp on error.
7472 *
7473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7474 * @param iSegReg The index of the segment register to use for
7475 * this access. The base and limits are checked.
7476 * @param GCPtrMem The address of the guest memory.
7477 * @param pu256Value Pointer to the value to store.
7478 */
7479void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7480{
7481 /* The lazy approach for now... */
7482 uint8_t bUnmapInfo;
7483 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7484 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7485 pu256Dst->au64[0] = pu256Value->au64[0];
7486 pu256Dst->au64[1] = pu256Value->au64[1];
7487 pu256Dst->au64[2] = pu256Value->au64[2];
7488 pu256Dst->au64[3] = pu256Value->au64[3];
7489 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7490 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7491}
7492#endif
7493
7494
7495/**
7496 * Stores a data dqword, AVX \#GP(0) aligned.
7497 *
7498 * @returns Strict VBox status code.
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param iSegReg The index of the segment register to use for
7501 * this access. The base and limits are checked.
7502 * @param GCPtrMem The address of the guest memory.
7503 * @param pu256Value Pointer to the value to store.
7504 */
7505VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7506{
7507 /* The lazy approach for now... */
7508 uint8_t bUnmapInfo;
7509 PRTUINT256U pu256Dst;
7510 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7511 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7512 if (rc == VINF_SUCCESS)
7513 {
7514 pu256Dst->au64[0] = pu256Value->au64[0];
7515 pu256Dst->au64[1] = pu256Value->au64[1];
7516 pu256Dst->au64[2] = pu256Value->au64[2];
7517 pu256Dst->au64[3] = pu256Value->au64[3];
7518 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7519 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7520 }
7521 return rc;
7522}
7523
7524
7525#ifdef IEM_WITH_SETJMP
7526/**
7527 * Stores a data dqword, AVX aligned.
7528 *
7529 * @returns Strict VBox status code.
7530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7531 * @param iSegReg The index of the segment register to use for
7532 * this access. The base and limits are checked.
7533 * @param GCPtrMem The address of the guest memory.
7534 * @param pu256Value Pointer to the value to store.
7535 */
7536void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7537 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7538{
7539 /* The lazy approach for now... */
7540 uint8_t bUnmapInfo;
7541 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7542 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7543 pu256Dst->au64[0] = pu256Value->au64[0];
7544 pu256Dst->au64[1] = pu256Value->au64[1];
7545 pu256Dst->au64[2] = pu256Value->au64[2];
7546 pu256Dst->au64[3] = pu256Value->au64[3];
7547 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7548 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7549}
7550#endif
7551
7552
7553/**
7554 * Stores a descriptor register (sgdt, sidt).
7555 *
7556 * @returns Strict VBox status code.
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param cbLimit The limit.
7559 * @param GCPtrBase The base address.
7560 * @param iSegReg The index of the segment register to use for
7561 * this access. The base and limits are checked.
7562 * @param GCPtrMem The address of the guest memory.
7563 */
7564VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7565{
7566 /*
7567 * The SIDT and SGDT instructions actually stores the data using two
7568 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7569 * does not respond to opsize prefixes.
7570 */
7571 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7572 if (rcStrict == VINF_SUCCESS)
7573 {
7574 if (IEM_IS_16BIT_CODE(pVCpu))
7575 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7576 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7577 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7578 else if (IEM_IS_32BIT_CODE(pVCpu))
7579 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7580 else
7581 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7582 }
7583 return rcStrict;
7584}
7585
7586
7587/**
7588 * Begin a special stack push (used by interrupt, exceptions and such).
7589 *
7590 * This will raise \#SS or \#PF if appropriate.
7591 *
7592 * @returns Strict VBox status code.
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 * @param cbMem The number of bytes to push onto the stack.
7595 * @param cbAlign The alignment mask (7, 3, 1).
7596 * @param ppvMem Where to return the pointer to the stack memory.
7597 * As with the other memory functions this could be
7598 * direct access or bounce buffered access, so
7599 * don't commit register until the commit call
7600 * succeeds.
7601 * @param pbUnmapInfo Where to store unmap info for
7602 * iemMemStackPushCommitSpecial.
7603 * @param puNewRsp Where to return the new RSP value. This must be
7604 * passed unchanged to
7605 * iemMemStackPushCommitSpecial().
7606 */
7607VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7608 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7609{
7610 Assert(cbMem < UINT8_MAX);
7611 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7612 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7613}
7614
7615
7616/**
7617 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7618 *
7619 * This will update the rSP.
7620 *
7621 * @returns Strict VBox status code.
7622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7623 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7624 * @param uNewRsp The new RSP value returned by
7625 * iemMemStackPushBeginSpecial().
7626 */
7627VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7628{
7629 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7630 if (rcStrict == VINF_SUCCESS)
7631 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7632 return rcStrict;
7633}
7634
7635
7636/**
7637 * Begin a special stack pop (used by iret, retf and such).
7638 *
7639 * This will raise \#SS or \#PF if appropriate.
7640 *
7641 * @returns Strict VBox status code.
7642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7643 * @param cbMem The number of bytes to pop from the stack.
7644 * @param cbAlign The alignment mask (7, 3, 1).
7645 * @param ppvMem Where to return the pointer to the stack memory.
7646 * @param pbUnmapInfo Where to store unmap info for
7647 * iemMemStackPopDoneSpecial.
7648 * @param puNewRsp Where to return the new RSP value. This must be
7649 * assigned to CPUMCTX::rsp manually some time
7650 * after iemMemStackPopDoneSpecial() has been
7651 * called.
7652 */
7653VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7654 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7655{
7656 Assert(cbMem < UINT8_MAX);
7657 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7658 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7659}
7660
7661
7662/**
7663 * Continue a special stack pop (used by iret and retf), for the purpose of
7664 * retrieving a new stack pointer.
7665 *
7666 * This will raise \#SS or \#PF if appropriate.
7667 *
7668 * @returns Strict VBox status code.
7669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7670 * @param off Offset from the top of the stack. This is zero
7671 * except in the retf case.
7672 * @param cbMem The number of bytes to pop from the stack.
7673 * @param ppvMem Where to return the pointer to the stack memory.
7674 * @param pbUnmapInfo Where to store unmap info for
7675 * iemMemStackPopDoneSpecial.
7676 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7677 * return this because all use of this function is
7678 * to retrieve a new value and anything we return
7679 * here would be discarded.)
7680 */
7681VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7682 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7683{
7684 Assert(cbMem < UINT8_MAX);
7685
7686 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7687 RTGCPTR GCPtrTop;
7688 if (IEM_IS_64BIT_CODE(pVCpu))
7689 GCPtrTop = uCurNewRsp;
7690 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7691 GCPtrTop = (uint32_t)uCurNewRsp;
7692 else
7693 GCPtrTop = (uint16_t)uCurNewRsp;
7694
7695 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7696 0 /* checked in iemMemStackPopBeginSpecial */);
7697}
7698
7699
7700/**
7701 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7702 * iemMemStackPopContinueSpecial).
7703 *
7704 * The caller will manually commit the rSP.
7705 *
7706 * @returns Strict VBox status code.
7707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7708 * @param bUnmapInfo Unmap information returned by
7709 * iemMemStackPopBeginSpecial() or
7710 * iemMemStackPopContinueSpecial().
7711 */
7712VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7713{
7714 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7715}
7716
7717
7718/**
7719 * Fetches a system table byte.
7720 *
7721 * @returns Strict VBox status code.
7722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7723 * @param pbDst Where to return the byte.
7724 * @param iSegReg The index of the segment register to use for
7725 * this access. The base and limits are checked.
7726 * @param GCPtrMem The address of the guest memory.
7727 */
7728VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7729{
7730 /* The lazy approach for now... */
7731 uint8_t bUnmapInfo;
7732 uint8_t const *pbSrc;
7733 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7734 if (rc == VINF_SUCCESS)
7735 {
7736 *pbDst = *pbSrc;
7737 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7738 }
7739 return rc;
7740}
7741
7742
7743/**
7744 * Fetches a system table word.
7745 *
7746 * @returns Strict VBox status code.
7747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7748 * @param pu16Dst Where to return the word.
7749 * @param iSegReg The index of the segment register to use for
7750 * this access. The base and limits are checked.
7751 * @param GCPtrMem The address of the guest memory.
7752 */
7753VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7754{
7755 /* The lazy approach for now... */
7756 uint8_t bUnmapInfo;
7757 uint16_t const *pu16Src;
7758 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7759 if (rc == VINF_SUCCESS)
7760 {
7761 *pu16Dst = *pu16Src;
7762 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7763 }
7764 return rc;
7765}
7766
7767
7768/**
7769 * Fetches a system table dword.
7770 *
7771 * @returns Strict VBox status code.
7772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7773 * @param pu32Dst Where to return the dword.
7774 * @param iSegReg The index of the segment register to use for
7775 * this access. The base and limits are checked.
7776 * @param GCPtrMem The address of the guest memory.
7777 */
7778VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7779{
7780 /* The lazy approach for now... */
7781 uint8_t bUnmapInfo;
7782 uint32_t const *pu32Src;
7783 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7784 if (rc == VINF_SUCCESS)
7785 {
7786 *pu32Dst = *pu32Src;
7787 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7788 }
7789 return rc;
7790}
7791
7792
7793/**
7794 * Fetches a system table qword.
7795 *
7796 * @returns Strict VBox status code.
7797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7798 * @param pu64Dst Where to return the qword.
7799 * @param iSegReg The index of the segment register to use for
7800 * this access. The base and limits are checked.
7801 * @param GCPtrMem The address of the guest memory.
7802 */
7803VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7804{
7805 /* The lazy approach for now... */
7806 uint8_t bUnmapInfo;
7807 uint64_t const *pu64Src;
7808 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7809 if (rc == VINF_SUCCESS)
7810 {
7811 *pu64Dst = *pu64Src;
7812 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7813 }
7814 return rc;
7815}
7816
7817
7818/**
7819 * Fetches a descriptor table entry with caller specified error code.
7820 *
7821 * @returns Strict VBox status code.
7822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7823 * @param pDesc Where to return the descriptor table entry.
7824 * @param uSel The selector which table entry to fetch.
7825 * @param uXcpt The exception to raise on table lookup error.
7826 * @param uErrorCode The error code associated with the exception.
7827 */
7828static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7829 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7830{
7831 AssertPtr(pDesc);
7832 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7833
7834 /** @todo did the 286 require all 8 bytes to be accessible? */
7835 /*
7836 * Get the selector table base and check bounds.
7837 */
7838 RTGCPTR GCPtrBase;
7839 if (uSel & X86_SEL_LDT)
7840 {
7841 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7842 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7843 {
7844 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7845 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7846 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7847 uErrorCode, 0);
7848 }
7849
7850 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7851 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7852 }
7853 else
7854 {
7855 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7856 {
7857 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7858 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7859 uErrorCode, 0);
7860 }
7861 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7862 }
7863
7864 /*
7865 * Read the legacy descriptor and maybe the long mode extensions if
7866 * required.
7867 */
7868 VBOXSTRICTRC rcStrict;
7869 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7870 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7871 else
7872 {
7873 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7874 if (rcStrict == VINF_SUCCESS)
7875 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7876 if (rcStrict == VINF_SUCCESS)
7877 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7878 if (rcStrict == VINF_SUCCESS)
7879 pDesc->Legacy.au16[3] = 0;
7880 else
7881 return rcStrict;
7882 }
7883
7884 if (rcStrict == VINF_SUCCESS)
7885 {
7886 if ( !IEM_IS_LONG_MODE(pVCpu)
7887 || pDesc->Legacy.Gen.u1DescType)
7888 pDesc->Long.au64[1] = 0;
7889 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7890 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7891 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7892 else
7893 {
7894 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7895 /** @todo is this the right exception? */
7896 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7897 }
7898 }
7899 return rcStrict;
7900}
7901
7902
7903/**
7904 * Fetches a descriptor table entry.
7905 *
7906 * @returns Strict VBox status code.
7907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7908 * @param pDesc Where to return the descriptor table entry.
7909 * @param uSel The selector which table entry to fetch.
7910 * @param uXcpt The exception to raise on table lookup error.
7911 */
7912VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7913{
7914 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7915}
7916
7917
7918/**
7919 * Marks the selector descriptor as accessed (only non-system descriptors).
7920 *
7921 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7922 * will therefore skip the limit checks.
7923 *
7924 * @returns Strict VBox status code.
7925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7926 * @param uSel The selector.
7927 */
7928VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7929{
7930 /*
7931 * Get the selector table base and calculate the entry address.
7932 */
7933 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7934 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7935 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7936 GCPtr += uSel & X86_SEL_MASK;
7937
7938 /*
7939 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7940 * ugly stuff to avoid this. This will make sure it's an atomic access
7941 * as well more or less remove any question about 8-bit or 32-bit accesss.
7942 */
7943 VBOXSTRICTRC rcStrict;
7944 uint8_t bUnmapInfo;
7945 uint32_t volatile *pu32;
7946 if ((GCPtr & 3) == 0)
7947 {
7948 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7949 GCPtr += 2 + 2;
7950 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7951 if (rcStrict != VINF_SUCCESS)
7952 return rcStrict;
7953 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7954 }
7955 else
7956 {
7957 /* The misaligned GDT/LDT case, map the whole thing. */
7958 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7959 if (rcStrict != VINF_SUCCESS)
7960 return rcStrict;
7961 switch ((uintptr_t)pu32 & 3)
7962 {
7963 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7964 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7965 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7966 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7967 }
7968 }
7969
7970 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7971}
7972
7973
7974#undef LOG_GROUP
7975#define LOG_GROUP LOG_GROUP_IEM
7976
7977/** @} */
7978
7979/** @name Opcode Helpers.
7980 * @{
7981 */
7982
7983/**
7984 * Calculates the effective address of a ModR/M memory operand.
7985 *
7986 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7987 *
7988 * @return Strict VBox status code.
7989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7990 * @param bRm The ModRM byte.
7991 * @param cbImmAndRspOffset - First byte: The size of any immediate
7992 * following the effective address opcode bytes
7993 * (only for RIP relative addressing).
7994 * - Second byte: RSP displacement (for POP [ESP]).
7995 * @param pGCPtrEff Where to return the effective address.
7996 */
7997VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7998{
7999 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8000# define SET_SS_DEF() \
8001 do \
8002 { \
8003 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8004 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8005 } while (0)
8006
8007 if (!IEM_IS_64BIT_CODE(pVCpu))
8008 {
8009/** @todo Check the effective address size crap! */
8010 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8011 {
8012 uint16_t u16EffAddr;
8013
8014 /* Handle the disp16 form with no registers first. */
8015 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8016 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8017 else
8018 {
8019 /* Get the displacment. */
8020 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8021 {
8022 case 0: u16EffAddr = 0; break;
8023 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8024 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8025 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8026 }
8027
8028 /* Add the base and index registers to the disp. */
8029 switch (bRm & X86_MODRM_RM_MASK)
8030 {
8031 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8032 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8033 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8034 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8035 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8036 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8037 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8038 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8039 }
8040 }
8041
8042 *pGCPtrEff = u16EffAddr;
8043 }
8044 else
8045 {
8046 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8047 uint32_t u32EffAddr;
8048
8049 /* Handle the disp32 form with no registers first. */
8050 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8051 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8052 else
8053 {
8054 /* Get the register (or SIB) value. */
8055 switch ((bRm & X86_MODRM_RM_MASK))
8056 {
8057 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8058 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8059 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8060 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8061 case 4: /* SIB */
8062 {
8063 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8064
8065 /* Get the index and scale it. */
8066 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8067 {
8068 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8069 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8070 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8071 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8072 case 4: u32EffAddr = 0; /*none */ break;
8073 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8074 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8075 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8077 }
8078 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8079
8080 /* add base */
8081 switch (bSib & X86_SIB_BASE_MASK)
8082 {
8083 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8084 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8085 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8086 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8087 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8088 case 5:
8089 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8090 {
8091 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8092 SET_SS_DEF();
8093 }
8094 else
8095 {
8096 uint32_t u32Disp;
8097 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8098 u32EffAddr += u32Disp;
8099 }
8100 break;
8101 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8102 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8104 }
8105 break;
8106 }
8107 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8108 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8109 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8111 }
8112
8113 /* Get and add the displacement. */
8114 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8115 {
8116 case 0:
8117 break;
8118 case 1:
8119 {
8120 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8121 u32EffAddr += i8Disp;
8122 break;
8123 }
8124 case 2:
8125 {
8126 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8127 u32EffAddr += u32Disp;
8128 break;
8129 }
8130 default:
8131 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8132 }
8133
8134 }
8135 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8136 *pGCPtrEff = u32EffAddr;
8137 }
8138 }
8139 else
8140 {
8141 uint64_t u64EffAddr;
8142
8143 /* Handle the rip+disp32 form with no registers first. */
8144 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8145 {
8146 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8147 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8148 }
8149 else
8150 {
8151 /* Get the register (or SIB) value. */
8152 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8153 {
8154 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8155 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8156 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8157 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8158 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8159 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8160 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8161 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8162 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8163 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8164 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8165 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8166 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8167 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8168 /* SIB */
8169 case 4:
8170 case 12:
8171 {
8172 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8173
8174 /* Get the index and scale it. */
8175 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8176 {
8177 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8178 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8179 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8180 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8181 case 4: u64EffAddr = 0; /*none */ break;
8182 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8183 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8184 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8185 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8186 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8187 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8188 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8189 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8190 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8191 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8192 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8194 }
8195 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8196
8197 /* add base */
8198 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8199 {
8200 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8201 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8202 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8203 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8204 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8205 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8206 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8207 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8208 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8209 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8210 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8211 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8212 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8213 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8214 /* complicated encodings */
8215 case 5:
8216 case 13:
8217 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8218 {
8219 if (!pVCpu->iem.s.uRexB)
8220 {
8221 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8222 SET_SS_DEF();
8223 }
8224 else
8225 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8226 }
8227 else
8228 {
8229 uint32_t u32Disp;
8230 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8231 u64EffAddr += (int32_t)u32Disp;
8232 }
8233 break;
8234 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8235 }
8236 break;
8237 }
8238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8239 }
8240
8241 /* Get and add the displacement. */
8242 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8243 {
8244 case 0:
8245 break;
8246 case 1:
8247 {
8248 int8_t i8Disp;
8249 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8250 u64EffAddr += i8Disp;
8251 break;
8252 }
8253 case 2:
8254 {
8255 uint32_t u32Disp;
8256 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8257 u64EffAddr += (int32_t)u32Disp;
8258 break;
8259 }
8260 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8261 }
8262
8263 }
8264
8265 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8266 *pGCPtrEff = u64EffAddr;
8267 else
8268 {
8269 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8270 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8271 }
8272 }
8273
8274 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8275 return VINF_SUCCESS;
8276}
8277
8278
8279#ifdef IEM_WITH_SETJMP
8280/**
8281 * Calculates the effective address of a ModR/M memory operand.
8282 *
8283 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8284 *
8285 * May longjmp on internal error.
8286 *
8287 * @return The effective address.
8288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8289 * @param bRm The ModRM byte.
8290 * @param cbImmAndRspOffset - First byte: The size of any immediate
8291 * following the effective address opcode bytes
8292 * (only for RIP relative addressing).
8293 * - Second byte: RSP displacement (for POP [ESP]).
8294 */
8295RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8296{
8297 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8298# define SET_SS_DEF() \
8299 do \
8300 { \
8301 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8302 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8303 } while (0)
8304
8305 if (!IEM_IS_64BIT_CODE(pVCpu))
8306 {
8307/** @todo Check the effective address size crap! */
8308 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8309 {
8310 uint16_t u16EffAddr;
8311
8312 /* Handle the disp16 form with no registers first. */
8313 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8314 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8315 else
8316 {
8317 /* Get the displacment. */
8318 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8319 {
8320 case 0: u16EffAddr = 0; break;
8321 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8322 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8323 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8324 }
8325
8326 /* Add the base and index registers to the disp. */
8327 switch (bRm & X86_MODRM_RM_MASK)
8328 {
8329 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8330 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8331 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8332 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8333 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8334 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8335 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8336 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8337 }
8338 }
8339
8340 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8341 return u16EffAddr;
8342 }
8343
8344 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8345 uint32_t u32EffAddr;
8346
8347 /* Handle the disp32 form with no registers first. */
8348 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8349 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8350 else
8351 {
8352 /* Get the register (or SIB) value. */
8353 switch ((bRm & X86_MODRM_RM_MASK))
8354 {
8355 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8356 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8357 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8358 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8359 case 4: /* SIB */
8360 {
8361 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8362
8363 /* Get the index and scale it. */
8364 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8365 {
8366 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8367 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8368 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8369 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8370 case 4: u32EffAddr = 0; /*none */ break;
8371 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8372 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8373 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8374 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8375 }
8376 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8377
8378 /* add base */
8379 switch (bSib & X86_SIB_BASE_MASK)
8380 {
8381 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8382 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8383 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8384 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8385 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8386 case 5:
8387 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8388 {
8389 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8390 SET_SS_DEF();
8391 }
8392 else
8393 {
8394 uint32_t u32Disp;
8395 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8396 u32EffAddr += u32Disp;
8397 }
8398 break;
8399 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8400 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8401 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8402 }
8403 break;
8404 }
8405 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8406 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8407 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8408 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8409 }
8410
8411 /* Get and add the displacement. */
8412 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8413 {
8414 case 0:
8415 break;
8416 case 1:
8417 {
8418 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8419 u32EffAddr += i8Disp;
8420 break;
8421 }
8422 case 2:
8423 {
8424 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8425 u32EffAddr += u32Disp;
8426 break;
8427 }
8428 default:
8429 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8430 }
8431 }
8432
8433 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8434 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8435 return u32EffAddr;
8436 }
8437
8438 uint64_t u64EffAddr;
8439
8440 /* Handle the rip+disp32 form with no registers first. */
8441 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8442 {
8443 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8444 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8445 }
8446 else
8447 {
8448 /* Get the register (or SIB) value. */
8449 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8450 {
8451 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8452 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8453 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8454 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8455 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8456 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8457 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8458 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8459 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8460 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8461 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8462 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8463 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8464 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8465 /* SIB */
8466 case 4:
8467 case 12:
8468 {
8469 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8470
8471 /* Get the index and scale it. */
8472 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8473 {
8474 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8475 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8476 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8477 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8478 case 4: u64EffAddr = 0; /*none */ break;
8479 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8480 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8481 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8482 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8483 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8484 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8485 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8486 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8487 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8488 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8489 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8490 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8491 }
8492 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8493
8494 /* add base */
8495 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8496 {
8497 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8498 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8499 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8500 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8501 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8502 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8503 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8504 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8505 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8506 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8507 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8508 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8509 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8510 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8511 /* complicated encodings */
8512 case 5:
8513 case 13:
8514 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8515 {
8516 if (!pVCpu->iem.s.uRexB)
8517 {
8518 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8519 SET_SS_DEF();
8520 }
8521 else
8522 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8523 }
8524 else
8525 {
8526 uint32_t u32Disp;
8527 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8528 u64EffAddr += (int32_t)u32Disp;
8529 }
8530 break;
8531 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8532 }
8533 break;
8534 }
8535 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8536 }
8537
8538 /* Get and add the displacement. */
8539 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8540 {
8541 case 0:
8542 break;
8543 case 1:
8544 {
8545 int8_t i8Disp;
8546 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8547 u64EffAddr += i8Disp;
8548 break;
8549 }
8550 case 2:
8551 {
8552 uint32_t u32Disp;
8553 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8554 u64EffAddr += (int32_t)u32Disp;
8555 break;
8556 }
8557 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8558 }
8559
8560 }
8561
8562 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8563 {
8564 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8565 return u64EffAddr;
8566 }
8567 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8568 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8569 return u64EffAddr & UINT32_MAX;
8570}
8571#endif /* IEM_WITH_SETJMP */
8572
8573
8574/**
8575 * Calculates the effective address of a ModR/M memory operand, extended version
8576 * for use in the recompilers.
8577 *
8578 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8579 *
8580 * @return Strict VBox status code.
8581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8582 * @param bRm The ModRM byte.
8583 * @param cbImmAndRspOffset - First byte: The size of any immediate
8584 * following the effective address opcode bytes
8585 * (only for RIP relative addressing).
8586 * - Second byte: RSP displacement (for POP [ESP]).
8587 * @param pGCPtrEff Where to return the effective address.
8588 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8589 * SIB byte (bits 39:32).
8590 */
8591VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8592{
8593 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8594# define SET_SS_DEF() \
8595 do \
8596 { \
8597 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8598 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8599 } while (0)
8600
8601 uint64_t uInfo;
8602 if (!IEM_IS_64BIT_CODE(pVCpu))
8603 {
8604/** @todo Check the effective address size crap! */
8605 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8606 {
8607 uint16_t u16EffAddr;
8608
8609 /* Handle the disp16 form with no registers first. */
8610 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8611 {
8612 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8613 uInfo = u16EffAddr;
8614 }
8615 else
8616 {
8617 /* Get the displacment. */
8618 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8619 {
8620 case 0: u16EffAddr = 0; break;
8621 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8622 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8623 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8624 }
8625 uInfo = u16EffAddr;
8626
8627 /* Add the base and index registers to the disp. */
8628 switch (bRm & X86_MODRM_RM_MASK)
8629 {
8630 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8631 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8632 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8633 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8634 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8635 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8636 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8637 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8638 }
8639 }
8640
8641 *pGCPtrEff = u16EffAddr;
8642 }
8643 else
8644 {
8645 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8646 uint32_t u32EffAddr;
8647
8648 /* Handle the disp32 form with no registers first. */
8649 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8650 {
8651 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8652 uInfo = u32EffAddr;
8653 }
8654 else
8655 {
8656 /* Get the register (or SIB) value. */
8657 uInfo = 0;
8658 switch ((bRm & X86_MODRM_RM_MASK))
8659 {
8660 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8661 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8662 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8663 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8664 case 4: /* SIB */
8665 {
8666 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8667 uInfo = (uint64_t)bSib << 32;
8668
8669 /* Get the index and scale it. */
8670 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8671 {
8672 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8673 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8674 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8675 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8676 case 4: u32EffAddr = 0; /*none */ break;
8677 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8678 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8679 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8681 }
8682 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8683
8684 /* add base */
8685 switch (bSib & X86_SIB_BASE_MASK)
8686 {
8687 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8688 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8689 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8690 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8691 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8692 case 5:
8693 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8694 {
8695 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8696 SET_SS_DEF();
8697 }
8698 else
8699 {
8700 uint32_t u32Disp;
8701 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8702 u32EffAddr += u32Disp;
8703 uInfo |= u32Disp;
8704 }
8705 break;
8706 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8707 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8709 }
8710 break;
8711 }
8712 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8713 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8714 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8716 }
8717
8718 /* Get and add the displacement. */
8719 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8720 {
8721 case 0:
8722 break;
8723 case 1:
8724 {
8725 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8726 u32EffAddr += i8Disp;
8727 uInfo |= (uint32_t)(int32_t)i8Disp;
8728 break;
8729 }
8730 case 2:
8731 {
8732 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8733 u32EffAddr += u32Disp;
8734 uInfo |= (uint32_t)u32Disp;
8735 break;
8736 }
8737 default:
8738 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8739 }
8740
8741 }
8742 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8743 *pGCPtrEff = u32EffAddr;
8744 }
8745 }
8746 else
8747 {
8748 uint64_t u64EffAddr;
8749
8750 /* Handle the rip+disp32 form with no registers first. */
8751 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8752 {
8753 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8754 uInfo = (uint32_t)u64EffAddr;
8755 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8756 }
8757 else
8758 {
8759 /* Get the register (or SIB) value. */
8760 uInfo = 0;
8761 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8762 {
8763 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8764 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8765 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8766 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8767 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8768 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8769 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8770 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8771 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8772 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8773 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8774 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8775 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8776 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8777 /* SIB */
8778 case 4:
8779 case 12:
8780 {
8781 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8782 uInfo = (uint64_t)bSib << 32;
8783
8784 /* Get the index and scale it. */
8785 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8786 {
8787 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8788 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8789 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8790 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8791 case 4: u64EffAddr = 0; /*none */ break;
8792 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8793 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8794 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8795 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8796 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8797 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8798 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8799 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8800 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8801 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8802 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8803 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8804 }
8805 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8806
8807 /* add base */
8808 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8809 {
8810 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8811 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8812 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8813 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8814 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8815 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8816 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8817 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8818 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8819 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8820 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8821 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8822 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8823 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8824 /* complicated encodings */
8825 case 5:
8826 case 13:
8827 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8828 {
8829 if (!pVCpu->iem.s.uRexB)
8830 {
8831 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8832 SET_SS_DEF();
8833 }
8834 else
8835 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8836 }
8837 else
8838 {
8839 uint32_t u32Disp;
8840 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8841 u64EffAddr += (int32_t)u32Disp;
8842 uInfo |= u32Disp;
8843 }
8844 break;
8845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8846 }
8847 break;
8848 }
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851
8852 /* Get and add the displacement. */
8853 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8854 {
8855 case 0:
8856 break;
8857 case 1:
8858 {
8859 int8_t i8Disp;
8860 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8861 u64EffAddr += i8Disp;
8862 uInfo |= (uint32_t)(int32_t)i8Disp;
8863 break;
8864 }
8865 case 2:
8866 {
8867 uint32_t u32Disp;
8868 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8869 u64EffAddr += (int32_t)u32Disp;
8870 uInfo |= u32Disp;
8871 break;
8872 }
8873 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8874 }
8875
8876 }
8877
8878 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8879 *pGCPtrEff = u64EffAddr;
8880 else
8881 {
8882 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8883 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8884 }
8885 }
8886 *puInfo = uInfo;
8887
8888 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8889 return VINF_SUCCESS;
8890}
8891
8892/** @} */
8893
8894
8895#ifdef LOG_ENABLED
8896/**
8897 * Logs the current instruction.
8898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8899 * @param fSameCtx Set if we have the same context information as the VMM,
8900 * clear if we may have already executed an instruction in
8901 * our debug context. When clear, we assume IEMCPU holds
8902 * valid CPU mode info.
8903 *
8904 * The @a fSameCtx parameter is now misleading and obsolete.
8905 * @param pszFunction The IEM function doing the execution.
8906 */
8907static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8908{
8909# ifdef IN_RING3
8910 if (LogIs2Enabled())
8911 {
8912 char szInstr[256];
8913 uint32_t cbInstr = 0;
8914 if (fSameCtx)
8915 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8916 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8917 szInstr, sizeof(szInstr), &cbInstr);
8918 else
8919 {
8920 uint32_t fFlags = 0;
8921 switch (IEM_GET_CPU_MODE(pVCpu))
8922 {
8923 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8924 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8925 case IEMMODE_16BIT:
8926 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8927 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8928 else
8929 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8930 break;
8931 }
8932 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8933 szInstr, sizeof(szInstr), &cbInstr);
8934 }
8935
8936 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8937 Log2(("**** %s fExec=%x\n"
8938 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8939 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8940 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8941 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8942 " %s\n"
8943 , pszFunction, pVCpu->iem.s.fExec,
8944 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8945 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8946 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8947 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8948 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8949 szInstr));
8950
8951 /* This stuff sucks atm. as it fills the log with MSRs. */
8952 //if (LogIs3Enabled())
8953 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8954 }
8955 else
8956# endif
8957 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8958 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8959 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8960}
8961#endif /* LOG_ENABLED */
8962
8963
8964#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8965/**
8966 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8967 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8968 *
8969 * @returns Modified rcStrict.
8970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8971 * @param rcStrict The instruction execution status.
8972 */
8973static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8974{
8975 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8976 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8977 {
8978 /* VMX preemption timer takes priority over NMI-window exits. */
8979 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8980 {
8981 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8982 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8983 }
8984 /*
8985 * Check remaining intercepts.
8986 *
8987 * NMI-window and Interrupt-window VM-exits.
8988 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8989 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8990 *
8991 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8992 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8993 */
8994 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8995 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8996 && !TRPMHasTrap(pVCpu))
8997 {
8998 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
8999 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9000 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9001 {
9002 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9003 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9004 }
9005 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9006 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9007 {
9008 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9009 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9010 }
9011 }
9012 }
9013 /* TPR-below threshold/APIC write has the highest priority. */
9014 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9015 {
9016 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9017 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9019 }
9020 /* MTF takes priority over VMX-preemption timer. */
9021 else
9022 {
9023 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9024 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9025 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9026 }
9027 return rcStrict;
9028}
9029#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9030
9031
9032/**
9033 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9034 * IEMExecOneWithPrefetchedByPC.
9035 *
9036 * Similar code is found in IEMExecLots.
9037 *
9038 * @return Strict VBox status code.
9039 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9040 * @param fExecuteInhibit If set, execute the instruction following CLI,
9041 * POP SS and MOV SS,GR.
9042 * @param pszFunction The calling function name.
9043 */
9044DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9045{
9046 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9047 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9048 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9049 RT_NOREF_PV(pszFunction);
9050
9051#ifdef IEM_WITH_SETJMP
9052 VBOXSTRICTRC rcStrict;
9053 IEM_TRY_SETJMP(pVCpu, rcStrict)
9054 {
9055 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9056 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9057 }
9058 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9059 {
9060 pVCpu->iem.s.cLongJumps++;
9061 }
9062 IEM_CATCH_LONGJMP_END(pVCpu);
9063#else
9064 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9065 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9066#endif
9067 if (rcStrict == VINF_SUCCESS)
9068 pVCpu->iem.s.cInstructions++;
9069 if (pVCpu->iem.s.cActiveMappings > 0)
9070 {
9071 Assert(rcStrict != VINF_SUCCESS);
9072 iemMemRollback(pVCpu);
9073 }
9074 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9075 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9076 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9077
9078//#ifdef DEBUG
9079// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9080//#endif
9081
9082#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9083 /*
9084 * Perform any VMX nested-guest instruction boundary actions.
9085 *
9086 * If any of these causes a VM-exit, we must skip executing the next
9087 * instruction (would run into stale page tables). A VM-exit makes sure
9088 * there is no interrupt-inhibition, so that should ensure we don't go
9089 * to try execute the next instruction. Clearing fExecuteInhibit is
9090 * problematic because of the setjmp/longjmp clobbering above.
9091 */
9092 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9093 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9094 || rcStrict != VINF_SUCCESS)
9095 { /* likely */ }
9096 else
9097 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9098#endif
9099
9100 /* Execute the next instruction as well if a cli, pop ss or
9101 mov ss, Gr has just completed successfully. */
9102 if ( fExecuteInhibit
9103 && rcStrict == VINF_SUCCESS
9104 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9105 {
9106 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9107 if (rcStrict == VINF_SUCCESS)
9108 {
9109#ifdef LOG_ENABLED
9110 iemLogCurInstr(pVCpu, false, pszFunction);
9111#endif
9112#ifdef IEM_WITH_SETJMP
9113 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9114 {
9115 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9116 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9117 }
9118 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9119 {
9120 pVCpu->iem.s.cLongJumps++;
9121 }
9122 IEM_CATCH_LONGJMP_END(pVCpu);
9123#else
9124 IEM_OPCODE_GET_FIRST_U8(&b);
9125 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9126#endif
9127 if (rcStrict == VINF_SUCCESS)
9128 {
9129 pVCpu->iem.s.cInstructions++;
9130#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9131 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9132 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9133 { /* likely */ }
9134 else
9135 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9136#endif
9137 }
9138 if (pVCpu->iem.s.cActiveMappings > 0)
9139 {
9140 Assert(rcStrict != VINF_SUCCESS);
9141 iemMemRollback(pVCpu);
9142 }
9143 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9144 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9145 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9146 }
9147 else if (pVCpu->iem.s.cActiveMappings > 0)
9148 iemMemRollback(pVCpu);
9149 /** @todo drop this after we bake this change into RIP advancing. */
9150 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9151 }
9152
9153 /*
9154 * Return value fiddling, statistics and sanity assertions.
9155 */
9156 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9157
9158 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9160 return rcStrict;
9161}
9162
9163
9164/**
9165 * Execute one instruction.
9166 *
9167 * @return Strict VBox status code.
9168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9169 */
9170VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9171{
9172 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9173#ifdef LOG_ENABLED
9174 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9175#endif
9176
9177 /*
9178 * Do the decoding and emulation.
9179 */
9180 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9181 if (rcStrict == VINF_SUCCESS)
9182 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9183 else if (pVCpu->iem.s.cActiveMappings > 0)
9184 iemMemRollback(pVCpu);
9185
9186 if (rcStrict != VINF_SUCCESS)
9187 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9188 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9189 return rcStrict;
9190}
9191
9192
9193VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9194{
9195 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9196 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9197 if (rcStrict == VINF_SUCCESS)
9198 {
9199 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9200 if (pcbWritten)
9201 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9202 }
9203 else if (pVCpu->iem.s.cActiveMappings > 0)
9204 iemMemRollback(pVCpu);
9205
9206 return rcStrict;
9207}
9208
9209
9210VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9211 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9212{
9213 VBOXSTRICTRC rcStrict;
9214 if ( cbOpcodeBytes
9215 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9216 {
9217 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9218#ifdef IEM_WITH_CODE_TLB
9219 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9220 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9221 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9222 pVCpu->iem.s.offCurInstrStart = 0;
9223 pVCpu->iem.s.offInstrNextByte = 0;
9224 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9225#else
9226 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9227 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9228#endif
9229 rcStrict = VINF_SUCCESS;
9230 }
9231 else
9232 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9233 if (rcStrict == VINF_SUCCESS)
9234 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9235 else if (pVCpu->iem.s.cActiveMappings > 0)
9236 iemMemRollback(pVCpu);
9237
9238 return rcStrict;
9239}
9240
9241
9242VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9243{
9244 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9245 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9246 if (rcStrict == VINF_SUCCESS)
9247 {
9248 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9249 if (pcbWritten)
9250 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9251 }
9252 else if (pVCpu->iem.s.cActiveMappings > 0)
9253 iemMemRollback(pVCpu);
9254
9255 return rcStrict;
9256}
9257
9258
9259VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9260 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9261{
9262 VBOXSTRICTRC rcStrict;
9263 if ( cbOpcodeBytes
9264 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9265 {
9266 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9267#ifdef IEM_WITH_CODE_TLB
9268 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9269 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9270 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9271 pVCpu->iem.s.offCurInstrStart = 0;
9272 pVCpu->iem.s.offInstrNextByte = 0;
9273 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9274#else
9275 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9276 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9277#endif
9278 rcStrict = VINF_SUCCESS;
9279 }
9280 else
9281 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9282 if (rcStrict == VINF_SUCCESS)
9283 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9284 else if (pVCpu->iem.s.cActiveMappings > 0)
9285 iemMemRollback(pVCpu);
9286
9287 return rcStrict;
9288}
9289
9290
9291/**
9292 * For handling split cacheline lock operations when the host has split-lock
9293 * detection enabled.
9294 *
9295 * This will cause the interpreter to disregard the lock prefix and implicit
9296 * locking (xchg).
9297 *
9298 * @returns Strict VBox status code.
9299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9300 */
9301VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9302{
9303 /*
9304 * Do the decoding and emulation.
9305 */
9306 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9307 if (rcStrict == VINF_SUCCESS)
9308 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9309 else if (pVCpu->iem.s.cActiveMappings > 0)
9310 iemMemRollback(pVCpu);
9311
9312 if (rcStrict != VINF_SUCCESS)
9313 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9314 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9315 return rcStrict;
9316}
9317
9318
9319/**
9320 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9321 * inject a pending TRPM trap.
9322 */
9323VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9324{
9325 Assert(TRPMHasTrap(pVCpu));
9326
9327 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9328 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9329 {
9330 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9331#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9332 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9333 if (fIntrEnabled)
9334 {
9335 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9336 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9337 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9338 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9339 else
9340 {
9341 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9342 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9343 }
9344 }
9345#else
9346 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9347#endif
9348 if (fIntrEnabled)
9349 {
9350 uint8_t u8TrapNo;
9351 TRPMEVENT enmType;
9352 uint32_t uErrCode;
9353 RTGCPTR uCr2;
9354 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9355 AssertRC(rc2);
9356 Assert(enmType == TRPM_HARDWARE_INT);
9357 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9358
9359 TRPMResetTrap(pVCpu);
9360
9361#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9362 /* Injecting an event may cause a VM-exit. */
9363 if ( rcStrict != VINF_SUCCESS
9364 && rcStrict != VINF_IEM_RAISED_XCPT)
9365 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9366#else
9367 NOREF(rcStrict);
9368#endif
9369 }
9370 }
9371
9372 return VINF_SUCCESS;
9373}
9374
9375
9376VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9377{
9378 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9379 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9380 Assert(cMaxInstructions > 0);
9381
9382 /*
9383 * See if there is an interrupt pending in TRPM, inject it if we can.
9384 */
9385 /** @todo What if we are injecting an exception and not an interrupt? Is that
9386 * possible here? For now we assert it is indeed only an interrupt. */
9387 if (!TRPMHasTrap(pVCpu))
9388 { /* likely */ }
9389 else
9390 {
9391 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9392 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9393 { /*likely */ }
9394 else
9395 return rcStrict;
9396 }
9397
9398 /*
9399 * Initial decoder init w/ prefetch, then setup setjmp.
9400 */
9401 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9402 if (rcStrict == VINF_SUCCESS)
9403 {
9404#ifdef IEM_WITH_SETJMP
9405 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9406 IEM_TRY_SETJMP(pVCpu, rcStrict)
9407#endif
9408 {
9409 /*
9410 * The run loop. We limit ourselves to 4096 instructions right now.
9411 */
9412 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9413 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9414 for (;;)
9415 {
9416 /*
9417 * Log the state.
9418 */
9419#ifdef LOG_ENABLED
9420 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9421#endif
9422
9423 /*
9424 * Do the decoding and emulation.
9425 */
9426 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9427 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9428#ifdef VBOX_STRICT
9429 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9430#endif
9431 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9432 {
9433 Assert(pVCpu->iem.s.cActiveMappings == 0);
9434 pVCpu->iem.s.cInstructions++;
9435
9436#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9437 /* Perform any VMX nested-guest instruction boundary actions. */
9438 uint64_t fCpu = pVCpu->fLocalForcedActions;
9439 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9440 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9441 { /* likely */ }
9442 else
9443 {
9444 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9446 fCpu = pVCpu->fLocalForcedActions;
9447 else
9448 {
9449 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9450 break;
9451 }
9452 }
9453#endif
9454 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9455 {
9456#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9457 uint64_t fCpu = pVCpu->fLocalForcedActions;
9458#endif
9459 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9460 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9461 | VMCPU_FF_TLB_FLUSH
9462 | VMCPU_FF_UNHALT );
9463
9464 if (RT_LIKELY( ( !fCpu
9465 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9466 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9467 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9468 {
9469 if (--cMaxInstructionsGccStupidity > 0)
9470 {
9471 /* Poll timers every now an then according to the caller's specs. */
9472 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9473 || !TMTimerPollBool(pVM, pVCpu))
9474 {
9475 Assert(pVCpu->iem.s.cActiveMappings == 0);
9476 iemReInitDecoder(pVCpu);
9477 continue;
9478 }
9479 }
9480 }
9481 }
9482 Assert(pVCpu->iem.s.cActiveMappings == 0);
9483 }
9484 else if (pVCpu->iem.s.cActiveMappings > 0)
9485 iemMemRollback(pVCpu);
9486 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9487 break;
9488 }
9489 }
9490#ifdef IEM_WITH_SETJMP
9491 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9492 {
9493 if (pVCpu->iem.s.cActiveMappings > 0)
9494 iemMemRollback(pVCpu);
9495# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9496 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9497# endif
9498 pVCpu->iem.s.cLongJumps++;
9499 }
9500 IEM_CATCH_LONGJMP_END(pVCpu);
9501#endif
9502
9503 /*
9504 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9505 */
9506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9507 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9508 }
9509 else
9510 {
9511 if (pVCpu->iem.s.cActiveMappings > 0)
9512 iemMemRollback(pVCpu);
9513
9514#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9515 /*
9516 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9517 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9518 */
9519 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9520#endif
9521 }
9522
9523 /*
9524 * Maybe re-enter raw-mode and log.
9525 */
9526 if (rcStrict != VINF_SUCCESS)
9527 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9528 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9529 if (pcInstructions)
9530 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9531 return rcStrict;
9532}
9533
9534
9535/**
9536 * Interface used by EMExecuteExec, does exit statistics and limits.
9537 *
9538 * @returns Strict VBox status code.
9539 * @param pVCpu The cross context virtual CPU structure.
9540 * @param fWillExit To be defined.
9541 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9542 * @param cMaxInstructions Maximum number of instructions to execute.
9543 * @param cMaxInstructionsWithoutExits
9544 * The max number of instructions without exits.
9545 * @param pStats Where to return statistics.
9546 */
9547VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9548 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9549{
9550 NOREF(fWillExit); /** @todo define flexible exit crits */
9551
9552 /*
9553 * Initialize return stats.
9554 */
9555 pStats->cInstructions = 0;
9556 pStats->cExits = 0;
9557 pStats->cMaxExitDistance = 0;
9558 pStats->cReserved = 0;
9559
9560 /*
9561 * Initial decoder init w/ prefetch, then setup setjmp.
9562 */
9563 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9564 if (rcStrict == VINF_SUCCESS)
9565 {
9566#ifdef IEM_WITH_SETJMP
9567 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9568 IEM_TRY_SETJMP(pVCpu, rcStrict)
9569#endif
9570 {
9571#ifdef IN_RING0
9572 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9573#endif
9574 uint32_t cInstructionSinceLastExit = 0;
9575
9576 /*
9577 * The run loop. We limit ourselves to 4096 instructions right now.
9578 */
9579 PVM pVM = pVCpu->CTX_SUFF(pVM);
9580 for (;;)
9581 {
9582 /*
9583 * Log the state.
9584 */
9585#ifdef LOG_ENABLED
9586 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9587#endif
9588
9589 /*
9590 * Do the decoding and emulation.
9591 */
9592 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9593
9594 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9595 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9596
9597 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9598 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9599 {
9600 pStats->cExits += 1;
9601 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9602 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9603 cInstructionSinceLastExit = 0;
9604 }
9605
9606 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9607 {
9608 Assert(pVCpu->iem.s.cActiveMappings == 0);
9609 pVCpu->iem.s.cInstructions++;
9610 pStats->cInstructions++;
9611 cInstructionSinceLastExit++;
9612
9613#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9614 /* Perform any VMX nested-guest instruction boundary actions. */
9615 uint64_t fCpu = pVCpu->fLocalForcedActions;
9616 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9617 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9618 { /* likely */ }
9619 else
9620 {
9621 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9622 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9623 fCpu = pVCpu->fLocalForcedActions;
9624 else
9625 {
9626 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9627 break;
9628 }
9629 }
9630#endif
9631 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9632 {
9633#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9634 uint64_t fCpu = pVCpu->fLocalForcedActions;
9635#endif
9636 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9637 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9638 | VMCPU_FF_TLB_FLUSH
9639 | VMCPU_FF_UNHALT );
9640 if (RT_LIKELY( ( ( !fCpu
9641 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9642 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9643 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9644 || pStats->cInstructions < cMinInstructions))
9645 {
9646 if (pStats->cInstructions < cMaxInstructions)
9647 {
9648 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9649 {
9650#ifdef IN_RING0
9651 if ( !fCheckPreemptionPending
9652 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9653#endif
9654 {
9655 Assert(pVCpu->iem.s.cActiveMappings == 0);
9656 iemReInitDecoder(pVCpu);
9657 continue;
9658 }
9659#ifdef IN_RING0
9660 rcStrict = VINF_EM_RAW_INTERRUPT;
9661 break;
9662#endif
9663 }
9664 }
9665 }
9666 Assert(!(fCpu & VMCPU_FF_IEM));
9667 }
9668 Assert(pVCpu->iem.s.cActiveMappings == 0);
9669 }
9670 else if (pVCpu->iem.s.cActiveMappings > 0)
9671 iemMemRollback(pVCpu);
9672 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9673 break;
9674 }
9675 }
9676#ifdef IEM_WITH_SETJMP
9677 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9678 {
9679 if (pVCpu->iem.s.cActiveMappings > 0)
9680 iemMemRollback(pVCpu);
9681 pVCpu->iem.s.cLongJumps++;
9682 }
9683 IEM_CATCH_LONGJMP_END(pVCpu);
9684#endif
9685
9686 /*
9687 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9688 */
9689 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9691 }
9692 else
9693 {
9694 if (pVCpu->iem.s.cActiveMappings > 0)
9695 iemMemRollback(pVCpu);
9696
9697#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9698 /*
9699 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9700 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9701 */
9702 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9703#endif
9704 }
9705
9706 /*
9707 * Maybe re-enter raw-mode and log.
9708 */
9709 if (rcStrict != VINF_SUCCESS)
9710 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9711 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9712 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9713 return rcStrict;
9714}
9715
9716
9717/**
9718 * Injects a trap, fault, abort, software interrupt or external interrupt.
9719 *
9720 * The parameter list matches TRPMQueryTrapAll pretty closely.
9721 *
9722 * @returns Strict VBox status code.
9723 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9724 * @param u8TrapNo The trap number.
9725 * @param enmType What type is it (trap/fault/abort), software
9726 * interrupt or hardware interrupt.
9727 * @param uErrCode The error code if applicable.
9728 * @param uCr2 The CR2 value if applicable.
9729 * @param cbInstr The instruction length (only relevant for
9730 * software interrupts).
9731 */
9732VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9733 uint8_t cbInstr)
9734{
9735 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9736#ifdef DBGFTRACE_ENABLED
9737 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9738 u8TrapNo, enmType, uErrCode, uCr2);
9739#endif
9740
9741 uint32_t fFlags;
9742 switch (enmType)
9743 {
9744 case TRPM_HARDWARE_INT:
9745 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9746 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9747 uErrCode = uCr2 = 0;
9748 break;
9749
9750 case TRPM_SOFTWARE_INT:
9751 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9752 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9753 uErrCode = uCr2 = 0;
9754 break;
9755
9756 case TRPM_TRAP:
9757 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9758 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9759 if (u8TrapNo == X86_XCPT_PF)
9760 fFlags |= IEM_XCPT_FLAGS_CR2;
9761 switch (u8TrapNo)
9762 {
9763 case X86_XCPT_DF:
9764 case X86_XCPT_TS:
9765 case X86_XCPT_NP:
9766 case X86_XCPT_SS:
9767 case X86_XCPT_PF:
9768 case X86_XCPT_AC:
9769 case X86_XCPT_GP:
9770 fFlags |= IEM_XCPT_FLAGS_ERR;
9771 break;
9772 }
9773 break;
9774
9775 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9776 }
9777
9778 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9779
9780 if (pVCpu->iem.s.cActiveMappings > 0)
9781 iemMemRollback(pVCpu);
9782
9783 return rcStrict;
9784}
9785
9786
9787/**
9788 * Injects the active TRPM event.
9789 *
9790 * @returns Strict VBox status code.
9791 * @param pVCpu The cross context virtual CPU structure.
9792 */
9793VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9794{
9795#ifndef IEM_IMPLEMENTS_TASKSWITCH
9796 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9797#else
9798 uint8_t u8TrapNo;
9799 TRPMEVENT enmType;
9800 uint32_t uErrCode;
9801 RTGCUINTPTR uCr2;
9802 uint8_t cbInstr;
9803 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9804 if (RT_FAILURE(rc))
9805 return rc;
9806
9807 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9808 * ICEBP \#DB injection as a special case. */
9809 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9810#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9811 if (rcStrict == VINF_SVM_VMEXIT)
9812 rcStrict = VINF_SUCCESS;
9813#endif
9814#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9815 if (rcStrict == VINF_VMX_VMEXIT)
9816 rcStrict = VINF_SUCCESS;
9817#endif
9818 /** @todo Are there any other codes that imply the event was successfully
9819 * delivered to the guest? See @bugref{6607}. */
9820 if ( rcStrict == VINF_SUCCESS
9821 || rcStrict == VINF_IEM_RAISED_XCPT)
9822 TRPMResetTrap(pVCpu);
9823
9824 return rcStrict;
9825#endif
9826}
9827
9828
9829VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9830{
9831 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9832 return VERR_NOT_IMPLEMENTED;
9833}
9834
9835
9836VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9837{
9838 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9839 return VERR_NOT_IMPLEMENTED;
9840}
9841
9842
9843/**
9844 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9845 *
9846 * This API ASSUMES that the caller has already verified that the guest code is
9847 * allowed to access the I/O port. (The I/O port is in the DX register in the
9848 * guest state.)
9849 *
9850 * @returns Strict VBox status code.
9851 * @param pVCpu The cross context virtual CPU structure.
9852 * @param cbValue The size of the I/O port access (1, 2, or 4).
9853 * @param enmAddrMode The addressing mode.
9854 * @param fRepPrefix Indicates whether a repeat prefix is used
9855 * (doesn't matter which for this instruction).
9856 * @param cbInstr The instruction length in bytes.
9857 * @param iEffSeg The effective segment address.
9858 * @param fIoChecked Whether the access to the I/O port has been
9859 * checked or not. It's typically checked in the
9860 * HM scenario.
9861 */
9862VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9863 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9864{
9865 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9866 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9867
9868 /*
9869 * State init.
9870 */
9871 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9872
9873 /*
9874 * Switch orgy for getting to the right handler.
9875 */
9876 VBOXSTRICTRC rcStrict;
9877 if (fRepPrefix)
9878 {
9879 switch (enmAddrMode)
9880 {
9881 case IEMMODE_16BIT:
9882 switch (cbValue)
9883 {
9884 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9885 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9886 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9887 default:
9888 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9889 }
9890 break;
9891
9892 case IEMMODE_32BIT:
9893 switch (cbValue)
9894 {
9895 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9896 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9897 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9898 default:
9899 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9900 }
9901 break;
9902
9903 case IEMMODE_64BIT:
9904 switch (cbValue)
9905 {
9906 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9907 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9908 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9909 default:
9910 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9911 }
9912 break;
9913
9914 default:
9915 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9916 }
9917 }
9918 else
9919 {
9920 switch (enmAddrMode)
9921 {
9922 case IEMMODE_16BIT:
9923 switch (cbValue)
9924 {
9925 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9926 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9927 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9928 default:
9929 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9930 }
9931 break;
9932
9933 case IEMMODE_32BIT:
9934 switch (cbValue)
9935 {
9936 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9937 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9938 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9939 default:
9940 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9941 }
9942 break;
9943
9944 case IEMMODE_64BIT:
9945 switch (cbValue)
9946 {
9947 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9948 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9949 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9950 default:
9951 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9952 }
9953 break;
9954
9955 default:
9956 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9957 }
9958 }
9959
9960 if (pVCpu->iem.s.cActiveMappings)
9961 iemMemRollback(pVCpu);
9962
9963 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9964}
9965
9966
9967/**
9968 * Interface for HM and EM for executing string I/O IN (read) instructions.
9969 *
9970 * This API ASSUMES that the caller has already verified that the guest code is
9971 * allowed to access the I/O port. (The I/O port is in the DX register in the
9972 * guest state.)
9973 *
9974 * @returns Strict VBox status code.
9975 * @param pVCpu The cross context virtual CPU structure.
9976 * @param cbValue The size of the I/O port access (1, 2, or 4).
9977 * @param enmAddrMode The addressing mode.
9978 * @param fRepPrefix Indicates whether a repeat prefix is used
9979 * (doesn't matter which for this instruction).
9980 * @param cbInstr The instruction length in bytes.
9981 * @param fIoChecked Whether the access to the I/O port has been
9982 * checked or not. It's typically checked in the
9983 * HM scenario.
9984 */
9985VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9986 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9987{
9988 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9989
9990 /*
9991 * State init.
9992 */
9993 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9994
9995 /*
9996 * Switch orgy for getting to the right handler.
9997 */
9998 VBOXSTRICTRC rcStrict;
9999 if (fRepPrefix)
10000 {
10001 switch (enmAddrMode)
10002 {
10003 case IEMMODE_16BIT:
10004 switch (cbValue)
10005 {
10006 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10007 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10008 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10009 default:
10010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10011 }
10012 break;
10013
10014 case IEMMODE_32BIT:
10015 switch (cbValue)
10016 {
10017 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10018 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10019 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10020 default:
10021 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10022 }
10023 break;
10024
10025 case IEMMODE_64BIT:
10026 switch (cbValue)
10027 {
10028 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10029 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10030 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10031 default:
10032 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10033 }
10034 break;
10035
10036 default:
10037 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10038 }
10039 }
10040 else
10041 {
10042 switch (enmAddrMode)
10043 {
10044 case IEMMODE_16BIT:
10045 switch (cbValue)
10046 {
10047 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10048 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10049 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10050 default:
10051 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10052 }
10053 break;
10054
10055 case IEMMODE_32BIT:
10056 switch (cbValue)
10057 {
10058 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10059 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10060 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10061 default:
10062 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10063 }
10064 break;
10065
10066 case IEMMODE_64BIT:
10067 switch (cbValue)
10068 {
10069 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10070 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10071 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10072 default:
10073 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10074 }
10075 break;
10076
10077 default:
10078 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10079 }
10080 }
10081
10082 if ( pVCpu->iem.s.cActiveMappings == 0
10083 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10084 { /* likely */ }
10085 else
10086 {
10087 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10088 iemMemRollback(pVCpu);
10089 }
10090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10091}
10092
10093
10094/**
10095 * Interface for rawmode to write execute an OUT instruction.
10096 *
10097 * @returns Strict VBox status code.
10098 * @param pVCpu The cross context virtual CPU structure.
10099 * @param cbInstr The instruction length in bytes.
10100 * @param u16Port The port to read.
10101 * @param fImm Whether the port is specified using an immediate operand or
10102 * using the implicit DX register.
10103 * @param cbReg The register size.
10104 *
10105 * @remarks In ring-0 not all of the state needs to be synced in.
10106 */
10107VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10108{
10109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10110 Assert(cbReg <= 4 && cbReg != 3);
10111
10112 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10113 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10114 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10115 Assert(!pVCpu->iem.s.cActiveMappings);
10116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10117}
10118
10119
10120/**
10121 * Interface for rawmode to write execute an IN instruction.
10122 *
10123 * @returns Strict VBox status code.
10124 * @param pVCpu The cross context virtual CPU structure.
10125 * @param cbInstr The instruction length in bytes.
10126 * @param u16Port The port to read.
10127 * @param fImm Whether the port is specified using an immediate operand or
10128 * using the implicit DX.
10129 * @param cbReg The register size.
10130 */
10131VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10132{
10133 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10134 Assert(cbReg <= 4 && cbReg != 3);
10135
10136 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10137 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10138 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10139 Assert(!pVCpu->iem.s.cActiveMappings);
10140 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10141}
10142
10143
10144/**
10145 * Interface for HM and EM to write to a CRx register.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure.
10149 * @param cbInstr The instruction length in bytes.
10150 * @param iCrReg The control register number (destination).
10151 * @param iGReg The general purpose register number (source).
10152 *
10153 * @remarks In ring-0 not all of the state needs to be synced in.
10154 */
10155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10156{
10157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10158 Assert(iCrReg < 16);
10159 Assert(iGReg < 16);
10160
10161 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10162 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10163 Assert(!pVCpu->iem.s.cActiveMappings);
10164 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10165}
10166
10167
10168/**
10169 * Interface for HM and EM to read from a CRx register.
10170 *
10171 * @returns Strict VBox status code.
10172 * @param pVCpu The cross context virtual CPU structure.
10173 * @param cbInstr The instruction length in bytes.
10174 * @param iGReg The general purpose register number (destination).
10175 * @param iCrReg The control register number (source).
10176 *
10177 * @remarks In ring-0 not all of the state needs to be synced in.
10178 */
10179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10180{
10181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10182 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10183 | CPUMCTX_EXTRN_APIC_TPR);
10184 Assert(iCrReg < 16);
10185 Assert(iGReg < 16);
10186
10187 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10188 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10189 Assert(!pVCpu->iem.s.cActiveMappings);
10190 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10191}
10192
10193
10194/**
10195 * Interface for HM and EM to write to a DRx register.
10196 *
10197 * @returns Strict VBox status code.
10198 * @param pVCpu The cross context virtual CPU structure.
10199 * @param cbInstr The instruction length in bytes.
10200 * @param iDrReg The debug register number (destination).
10201 * @param iGReg The general purpose register number (source).
10202 *
10203 * @remarks In ring-0 not all of the state needs to be synced in.
10204 */
10205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10206{
10207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10209 Assert(iDrReg < 8);
10210 Assert(iGReg < 16);
10211
10212 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10214 Assert(!pVCpu->iem.s.cActiveMappings);
10215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10216}
10217
10218
10219/**
10220 * Interface for HM and EM to read from a DRx register.
10221 *
10222 * @returns Strict VBox status code.
10223 * @param pVCpu The cross context virtual CPU structure.
10224 * @param cbInstr The instruction length in bytes.
10225 * @param iGReg The general purpose register number (destination).
10226 * @param iDrReg The debug register number (source).
10227 *
10228 * @remarks In ring-0 not all of the state needs to be synced in.
10229 */
10230VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10231{
10232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10233 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10234 Assert(iDrReg < 8);
10235 Assert(iGReg < 16);
10236
10237 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10238 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10239 Assert(!pVCpu->iem.s.cActiveMappings);
10240 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10241}
10242
10243
10244/**
10245 * Interface for HM and EM to clear the CR0[TS] bit.
10246 *
10247 * @returns Strict VBox status code.
10248 * @param pVCpu The cross context virtual CPU structure.
10249 * @param cbInstr The instruction length in bytes.
10250 *
10251 * @remarks In ring-0 not all of the state needs to be synced in.
10252 */
10253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10254{
10255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10256
10257 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10258 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10259 Assert(!pVCpu->iem.s.cActiveMappings);
10260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10261}
10262
10263
10264/**
10265 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10266 *
10267 * @returns Strict VBox status code.
10268 * @param pVCpu The cross context virtual CPU structure.
10269 * @param cbInstr The instruction length in bytes.
10270 * @param uValue The value to load into CR0.
10271 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10272 * memory operand. Otherwise pass NIL_RTGCPTR.
10273 *
10274 * @remarks In ring-0 not all of the state needs to be synced in.
10275 */
10276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10277{
10278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10279
10280 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10281 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10282 Assert(!pVCpu->iem.s.cActiveMappings);
10283 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10284}
10285
10286
10287/**
10288 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10289 *
10290 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10291 *
10292 * @returns Strict VBox status code.
10293 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10294 * @param cbInstr The instruction length in bytes.
10295 * @remarks In ring-0 not all of the state needs to be synced in.
10296 * @thread EMT(pVCpu)
10297 */
10298VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10299{
10300 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10301
10302 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10303 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10304 Assert(!pVCpu->iem.s.cActiveMappings);
10305 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10306}
10307
10308
10309/**
10310 * Interface for HM and EM to emulate the WBINVD instruction.
10311 *
10312 * @returns Strict VBox status code.
10313 * @param pVCpu The cross context virtual CPU structure.
10314 * @param cbInstr The instruction length in bytes.
10315 *
10316 * @remarks In ring-0 not all of the state needs to be synced in.
10317 */
10318VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10319{
10320 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10321
10322 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10324 Assert(!pVCpu->iem.s.cActiveMappings);
10325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10326}
10327
10328
10329/**
10330 * Interface for HM and EM to emulate the INVD instruction.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure.
10334 * @param cbInstr The instruction length in bytes.
10335 *
10336 * @remarks In ring-0 not all of the state needs to be synced in.
10337 */
10338VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10339{
10340 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10341
10342 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10343 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10344 Assert(!pVCpu->iem.s.cActiveMappings);
10345 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10346}
10347
10348
10349/**
10350 * Interface for HM and EM to emulate the INVLPG instruction.
10351 *
10352 * @returns Strict VBox status code.
10353 * @retval VINF_PGM_SYNC_CR3
10354 *
10355 * @param pVCpu The cross context virtual CPU structure.
10356 * @param cbInstr The instruction length in bytes.
10357 * @param GCPtrPage The effective address of the page to invalidate.
10358 *
10359 * @remarks In ring-0 not all of the state needs to be synced in.
10360 */
10361VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10362{
10363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10364
10365 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10366 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10367 Assert(!pVCpu->iem.s.cActiveMappings);
10368 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10369}
10370
10371
10372/**
10373 * Interface for HM and EM to emulate the INVPCID instruction.
10374 *
10375 * @returns Strict VBox status code.
10376 * @retval VINF_PGM_SYNC_CR3
10377 *
10378 * @param pVCpu The cross context virtual CPU structure.
10379 * @param cbInstr The instruction length in bytes.
10380 * @param iEffSeg The effective segment register.
10381 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10382 * @param uType The invalidation type.
10383 *
10384 * @remarks In ring-0 not all of the state needs to be synced in.
10385 */
10386VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10387 uint64_t uType)
10388{
10389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10390
10391 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10392 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10393 Assert(!pVCpu->iem.s.cActiveMappings);
10394 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10395}
10396
10397
10398/**
10399 * Interface for HM and EM to emulate the CPUID instruction.
10400 *
10401 * @returns Strict VBox status code.
10402 *
10403 * @param pVCpu The cross context virtual CPU structure.
10404 * @param cbInstr The instruction length in bytes.
10405 *
10406 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10407 */
10408VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10409{
10410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10411 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10412
10413 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10415 Assert(!pVCpu->iem.s.cActiveMappings);
10416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10417}
10418
10419
10420/**
10421 * Interface for HM and EM to emulate the RDPMC instruction.
10422 *
10423 * @returns Strict VBox status code.
10424 *
10425 * @param pVCpu The cross context virtual CPU structure.
10426 * @param cbInstr The instruction length in bytes.
10427 *
10428 * @remarks Not all of the state needs to be synced in.
10429 */
10430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10431{
10432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10433 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10434
10435 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10436 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10437 Assert(!pVCpu->iem.s.cActiveMappings);
10438 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10439}
10440
10441
10442/**
10443 * Interface for HM and EM to emulate the RDTSC instruction.
10444 *
10445 * @returns Strict VBox status code.
10446 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10447 *
10448 * @param pVCpu The cross context virtual CPU structure.
10449 * @param cbInstr The instruction length in bytes.
10450 *
10451 * @remarks Not all of the state needs to be synced in.
10452 */
10453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10454{
10455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10456 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10457
10458 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10460 Assert(!pVCpu->iem.s.cActiveMappings);
10461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10462}
10463
10464
10465/**
10466 * Interface for HM and EM to emulate the RDTSCP instruction.
10467 *
10468 * @returns Strict VBox status code.
10469 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10470 *
10471 * @param pVCpu The cross context virtual CPU structure.
10472 * @param cbInstr The instruction length in bytes.
10473 *
10474 * @remarks Not all of the state needs to be synced in. Recommended
10475 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10476 */
10477VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10478{
10479 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10480 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10481
10482 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10483 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10484 Assert(!pVCpu->iem.s.cActiveMappings);
10485 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10486}
10487
10488
10489/**
10490 * Interface for HM and EM to emulate the RDMSR instruction.
10491 *
10492 * @returns Strict VBox status code.
10493 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10494 *
10495 * @param pVCpu The cross context virtual CPU structure.
10496 * @param cbInstr The instruction length in bytes.
10497 *
10498 * @remarks Not all of the state needs to be synced in. Requires RCX and
10499 * (currently) all MSRs.
10500 */
10501VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10502{
10503 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10504 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10505
10506 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10507 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10508 Assert(!pVCpu->iem.s.cActiveMappings);
10509 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10510}
10511
10512
10513/**
10514 * Interface for HM and EM to emulate the WRMSR instruction.
10515 *
10516 * @returns Strict VBox status code.
10517 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10518 *
10519 * @param pVCpu The cross context virtual CPU structure.
10520 * @param cbInstr The instruction length in bytes.
10521 *
10522 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10523 * and (currently) all MSRs.
10524 */
10525VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10526{
10527 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10528 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10529 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10530
10531 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10532 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10533 Assert(!pVCpu->iem.s.cActiveMappings);
10534 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10535}
10536
10537
10538/**
10539 * Interface for HM and EM to emulate the MONITOR instruction.
10540 *
10541 * @returns Strict VBox status code.
10542 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10543 *
10544 * @param pVCpu The cross context virtual CPU structure.
10545 * @param cbInstr The instruction length in bytes.
10546 *
10547 * @remarks Not all of the state needs to be synced in.
10548 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10549 * are used.
10550 */
10551VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10552{
10553 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10554 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10555
10556 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10557 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10558 Assert(!pVCpu->iem.s.cActiveMappings);
10559 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10560}
10561
10562
10563/**
10564 * Interface for HM and EM to emulate the MWAIT instruction.
10565 *
10566 * @returns Strict VBox status code.
10567 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10568 *
10569 * @param pVCpu The cross context virtual CPU structure.
10570 * @param cbInstr The instruction length in bytes.
10571 *
10572 * @remarks Not all of the state needs to be synced in.
10573 */
10574VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10575{
10576 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10577 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10578
10579 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10580 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10581 Assert(!pVCpu->iem.s.cActiveMappings);
10582 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10583}
10584
10585
10586/**
10587 * Interface for HM and EM to emulate the HLT instruction.
10588 *
10589 * @returns Strict VBox status code.
10590 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10591 *
10592 * @param pVCpu The cross context virtual CPU structure.
10593 * @param cbInstr The instruction length in bytes.
10594 *
10595 * @remarks Not all of the state needs to be synced in.
10596 */
10597VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10598{
10599 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10600
10601 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10602 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10603 Assert(!pVCpu->iem.s.cActiveMappings);
10604 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10605}
10606
10607
10608/**
10609 * Checks if IEM is in the process of delivering an event (interrupt or
10610 * exception).
10611 *
10612 * @returns true if we're in the process of raising an interrupt or exception,
10613 * false otherwise.
10614 * @param pVCpu The cross context virtual CPU structure.
10615 * @param puVector Where to store the vector associated with the
10616 * currently delivered event, optional.
10617 * @param pfFlags Where to store th event delivery flags (see
10618 * IEM_XCPT_FLAGS_XXX), optional.
10619 * @param puErr Where to store the error code associated with the
10620 * event, optional.
10621 * @param puCr2 Where to store the CR2 associated with the event,
10622 * optional.
10623 * @remarks The caller should check the flags to determine if the error code and
10624 * CR2 are valid for the event.
10625 */
10626VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10627{
10628 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10629 if (fRaisingXcpt)
10630 {
10631 if (puVector)
10632 *puVector = pVCpu->iem.s.uCurXcpt;
10633 if (pfFlags)
10634 *pfFlags = pVCpu->iem.s.fCurXcpt;
10635 if (puErr)
10636 *puErr = pVCpu->iem.s.uCurXcptErr;
10637 if (puCr2)
10638 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10639 }
10640 return fRaisingXcpt;
10641}
10642
10643#ifdef IN_RING3
10644
10645/**
10646 * Handles the unlikely and probably fatal merge cases.
10647 *
10648 * @returns Merged status code.
10649 * @param rcStrict Current EM status code.
10650 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10651 * with @a rcStrict.
10652 * @param iMemMap The memory mapping index. For error reporting only.
10653 * @param pVCpu The cross context virtual CPU structure of the calling
10654 * thread, for error reporting only.
10655 */
10656DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10657 unsigned iMemMap, PVMCPUCC pVCpu)
10658{
10659 if (RT_FAILURE_NP(rcStrict))
10660 return rcStrict;
10661
10662 if (RT_FAILURE_NP(rcStrictCommit))
10663 return rcStrictCommit;
10664
10665 if (rcStrict == rcStrictCommit)
10666 return rcStrictCommit;
10667
10668 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10669 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10670 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10671 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10673 return VERR_IOM_FF_STATUS_IPE;
10674}
10675
10676
10677/**
10678 * Helper for IOMR3ProcessForceFlag.
10679 *
10680 * @returns Merged status code.
10681 * @param rcStrict Current EM status code.
10682 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10683 * with @a rcStrict.
10684 * @param iMemMap The memory mapping index. For error reporting only.
10685 * @param pVCpu The cross context virtual CPU structure of the calling
10686 * thread, for error reporting only.
10687 */
10688DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10689{
10690 /* Simple. */
10691 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10692 return rcStrictCommit;
10693
10694 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10695 return rcStrict;
10696
10697 /* EM scheduling status codes. */
10698 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10699 && rcStrict <= VINF_EM_LAST))
10700 {
10701 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10702 && rcStrictCommit <= VINF_EM_LAST))
10703 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10704 }
10705
10706 /* Unlikely */
10707 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10708}
10709
10710
10711/**
10712 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10713 *
10714 * @returns Merge between @a rcStrict and what the commit operation returned.
10715 * @param pVM The cross context VM structure.
10716 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10717 * @param rcStrict The status code returned by ring-0 or raw-mode.
10718 */
10719VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10720{
10721 /*
10722 * Reset the pending commit.
10723 */
10724 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10725 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10726 ("%#x %#x %#x\n",
10727 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10728 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10729
10730 /*
10731 * Commit the pending bounce buffers (usually just one).
10732 */
10733 unsigned cBufs = 0;
10734 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10735 while (iMemMap-- > 0)
10736 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10737 {
10738 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10739 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10740 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10741
10742 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10743 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10744 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10745
10746 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10747 {
10748 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10749 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10750 pbBuf,
10751 cbFirst,
10752 PGMACCESSORIGIN_IEM);
10753 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10754 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10755 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10756 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10757 }
10758
10759 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10760 {
10761 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10762 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10763 pbBuf + cbFirst,
10764 cbSecond,
10765 PGMACCESSORIGIN_IEM);
10766 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10767 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10768 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10769 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10770 }
10771 cBufs++;
10772 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10773 }
10774
10775 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10776 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10777 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10778 pVCpu->iem.s.cActiveMappings = 0;
10779 return rcStrict;
10780}
10781
10782#endif /* IN_RING3 */
10783
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette