VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103778

Last change on this file since 103778 was 103671, checked in by vboxsync, 9 months ago

VMM/IEM: Native translation of IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() body, bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 449.7 KB
Line 
1/* $Id: IEMAll.cpp 103671 2024-03-04 15:48:34Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint64_t const uExitInfo1 = SelTss;
2402 uint64_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 break;
3200 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3201 break;
3202
3203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3204 }
3205
3206 /* Check DPL against CPL if applicable. */
3207 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3208 {
3209 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214 }
3215
3216 /* Is it there? */
3217 if (!Idte.Gate.u1Present)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3220 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 /* Is it a task-gate? */
3224 if (fTaskGate)
3225 {
3226 /*
3227 * Construct the error code masks based on what caused this task switch.
3228 * See Intel Instruction reference for INT.
3229 */
3230 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3231 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3232 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3233 RTSEL SelTss = Idte.Gate.u16Sel;
3234
3235 /*
3236 * Fetch the TSS descriptor in the GDT.
3237 */
3238 IEMSELDESC DescTSS;
3239 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3240 if (rcStrict != VINF_SUCCESS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3243 VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /* The TSS descriptor must be a system segment and be available (not busy). */
3248 if ( DescTSS.Legacy.Gen.u1DescType
3249 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3250 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3253 u8Vector, SelTss, DescTSS.Legacy.au64));
3254 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3255 }
3256
3257 /* The TSS must be present. */
3258 if (!DescTSS.Legacy.Gen.u1Present)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* Do the actual task switch. */
3265 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3266 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3267 fFlags, uErr, uCr2, SelTss, &DescTSS);
3268 }
3269
3270 /* A null CS is bad. */
3271 RTSEL NewCS = Idte.Gate.u16Sel;
3272 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3275 return iemRaiseGeneralProtectionFault0(pVCpu);
3276 }
3277
3278 /* Fetch the descriptor for the new CS. */
3279 IEMSELDESC DescCS;
3280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3281 if (rcStrict != VINF_SUCCESS)
3282 {
3283 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3284 return rcStrict;
3285 }
3286
3287 /* Must be a code segment. */
3288 if (!DescCS.Legacy.Gen.u1DescType)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3291 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3292 }
3293 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3297 }
3298
3299 /* Don't allow lowering the privilege level. */
3300 /** @todo Does the lowering of privileges apply to software interrupts
3301 * only? This has bearings on the more-privileged or
3302 * same-privilege stack behavior further down. A testcase would
3303 * be nice. */
3304 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3307 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3308 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3309 }
3310
3311 /* Make sure the selector is present. */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3316 }
3317
3318#ifdef LOG_ENABLED
3319 /* If software interrupt, try decode it if logging is enabled and such. */
3320 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3321 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3322 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3323#endif
3324
3325 /* Check the new EIP against the new CS limit. */
3326 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3327 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3328 ? Idte.Gate.u16OffsetLow
3329 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3330 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3331 if (uNewEip > cbLimitCS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3334 u8Vector, uNewEip, cbLimitCS, NewCS));
3335 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3336 }
3337 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3338
3339 /* Calc the flag image to push. */
3340 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3341 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3342 fEfl &= ~X86_EFL_RF;
3343 else
3344 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3345
3346 /* From V8086 mode only go to CPL 0. */
3347 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3348 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3349 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3350 {
3351 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3352 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3353 }
3354
3355 /*
3356 * If the privilege level changes, we need to get a new stack from the TSS.
3357 * This in turns means validating the new SS and ESP...
3358 */
3359 if (uNewCpl != IEM_GET_CPL(pVCpu))
3360 {
3361 RTSEL NewSS;
3362 uint32_t uNewEsp;
3363 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3364 if (rcStrict != VINF_SUCCESS)
3365 return rcStrict;
3366
3367 IEMSELDESC DescSS;
3368 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3369 if (rcStrict != VINF_SUCCESS)
3370 return rcStrict;
3371 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3372 if (!DescSS.Legacy.Gen.u1DefBig)
3373 {
3374 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3375 uNewEsp = (uint16_t)uNewEsp;
3376 }
3377
3378 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3379
3380 /* Check that there is sufficient space for the stack frame. */
3381 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3382 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3383 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3384 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3387 {
3388 if ( uNewEsp - 1 > cbLimitSS
3389 || uNewEsp < cbStackFrame)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3392 u8Vector, NewSS, uNewEsp, cbStackFrame));
3393 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3394 }
3395 }
3396 else
3397 {
3398 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3399 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3400 {
3401 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3402 u8Vector, NewSS, uNewEsp, cbStackFrame));
3403 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3404 }
3405 }
3406
3407 /*
3408 * Start making changes.
3409 */
3410
3411 /* Set the new CPL so that stack accesses use it. */
3412 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3413 IEM_SET_CPL(pVCpu, uNewCpl);
3414
3415 /* Create the stack frame. */
3416 uint8_t bUnmapInfoStackFrame;
3417 RTPTRUNION uStackFrame;
3418 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3419 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3420 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 if (f32BitGate)
3424 {
3425 if (fFlags & IEM_XCPT_FLAGS_ERR)
3426 *uStackFrame.pu32++ = uErr;
3427 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3428 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3429 uStackFrame.pu32[2] = fEfl;
3430 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3431 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3432 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3436 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3437 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3438 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3439 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3440 }
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3450 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3462 if (rcStrict != VINF_SUCCESS)
3463 return rcStrict;
3464
3465 /* Mark the selectors 'accessed' (hope this is the correct time). */
3466 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3467 * after pushing the stack frame? (Write protect the gdt + stack to
3468 * find out.) */
3469 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 /*
3486 * Start comitting the register changes (joins with the DPL=CPL branch).
3487 */
3488 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3491 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3492 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3493 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3494 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3495 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3496 * SP is loaded).
3497 * Need to check the other combinations too:
3498 * - 16-bit TSS, 32-bit handler
3499 * - 32-bit TSS, 16-bit handler */
3500 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3501 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3502 else
3503 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3504
3505 if (fEfl & X86_EFL_VM)
3506 {
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3511 }
3512 }
3513 /*
3514 * Same privilege, no stack change and smaller stack frame.
3515 */
3516 else
3517 {
3518 uint64_t uNewRsp;
3519 uint8_t bUnmapInfoStackFrame;
3520 RTPTRUNION uStackFrame;
3521 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3522 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3523 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 if (f32BitGate)
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu32++ = uErr;
3531 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu32[2] = fEfl;
3534 }
3535 else
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu16++ = uErr;
3539 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu16[2] = fEfl;
3542 }
3543 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546
3547 /* Mark the CS selector as 'accessed'. */
3548 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 /*
3557 * Start committing the register changes (joins with the other branch).
3558 */
3559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3560 }
3561
3562 /* ... register committing continues. */
3563 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3565 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3566 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3567 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3569
3570 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3571 fEfl &= ~fEflToClear;
3572 IEMMISC_SET_EFL(pVCpu, fEfl);
3573
3574 if (fFlags & IEM_XCPT_FLAGS_CR2)
3575 pVCpu->cpum.GstCtx.cr2 = uCr2;
3576
3577 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3578 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3579
3580 /* Make sure the execution flags are correct. */
3581 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3582 if (fExecNew != pVCpu->iem.s.fExec)
3583 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3584 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3585 pVCpu->iem.s.fExec = fExecNew;
3586 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3587
3588 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3589}
3590
3591
3592/**
3593 * Implements exceptions and interrupts for long mode.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param cbInstr The number of bytes to offset rIP by in the return
3598 * address.
3599 * @param u8Vector The interrupt / exception vector number.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 */
3604static VBOXSTRICTRC
3605iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3606 uint8_t cbInstr,
3607 uint8_t u8Vector,
3608 uint32_t fFlags,
3609 uint16_t uErr,
3610 uint64_t uCr2) RT_NOEXCEPT
3611{
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614 /*
3615 * Read the IDT entry.
3616 */
3617 uint16_t offIdt = (uint16_t)u8Vector << 4;
3618 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3621 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3622 }
3623 X86DESC64 Idte;
3624#ifdef _MSC_VER /* Shut up silly compiler warning. */
3625 Idte.au64[0] = 0;
3626 Idte.au64[1] = 0;
3627#endif
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3637 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3638 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3639
3640 /*
3641 * Check the descriptor type, DPL and such.
3642 * ASSUMES this is done in the same order as described for call-gate calls.
3643 */
3644 if (Idte.Gate.u1DescType)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3650 switch (Idte.Gate.u4Type)
3651 {
3652 case AMD64_SEL_TYPE_SYS_INT_GATE:
3653 fEflToClear |= X86_EFL_IF;
3654 break;
3655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3656 break;
3657
3658 default:
3659 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3660 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3661 }
3662
3663 /* Check DPL against CPL if applicable. */
3664 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3665 {
3666 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3669 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671 }
3672
3673 /* Is it there? */
3674 if (!Idte.Gate.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3677 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679
3680 /* A null CS is bad. */
3681 RTSEL NewCS = Idte.Gate.u16Sel;
3682 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687
3688 /* Fetch the descriptor for the new CS. */
3689 IEMSELDESC DescCS;
3690 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3691 if (rcStrict != VINF_SUCCESS)
3692 {
3693 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3694 return rcStrict;
3695 }
3696
3697 /* Must be a 64-bit code segment. */
3698 if (!DescCS.Long.Gen.u1DescType)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703 if ( !DescCS.Long.Gen.u1Long
3704 || DescCS.Long.Gen.u1DefBig
3705 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3708 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711
3712 /* Don't allow lowering the privilege level. For non-conforming CS
3713 selectors, the CS.DPL sets the privilege level the trap/interrupt
3714 handler runs at. For conforming CS selectors, the CPL remains
3715 unchanged, but the CS.DPL must be <= CPL. */
3716 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3717 * when CPU in Ring-0. Result \#GP? */
3718 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3721 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3722 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3723 }
3724
3725
3726 /* Make sure the selector is present. */
3727 if (!DescCS.Legacy.Gen.u1Present)
3728 {
3729 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3730 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3731 }
3732
3733 /* Check that the new RIP is canonical. */
3734 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3735 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3736 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3740 return iemRaiseGeneralProtectionFault0(pVCpu);
3741 }
3742
3743 /*
3744 * If the privilege level changes or if the IST isn't zero, we need to get
3745 * a new stack from the TSS.
3746 */
3747 uint64_t uNewRsp;
3748 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3749 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3750 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3751 || Idte.Gate.u3IST != 0)
3752 {
3753 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3754 if (rcStrict != VINF_SUCCESS)
3755 return rcStrict;
3756 }
3757 else
3758 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3759 uNewRsp &= ~(uint64_t)0xf;
3760
3761 /*
3762 * Calc the flag image to push.
3763 */
3764 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3765 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3766 fEfl &= ~X86_EFL_RF;
3767 else
3768 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3769
3770 /*
3771 * Start making changes.
3772 */
3773 /* Set the new CPL so that stack accesses use it. */
3774 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3775 IEM_SET_CPL(pVCpu, uNewCpl);
3776/** @todo Setting CPL this early seems wrong as it would affect and errors we
3777 * raise accessing the stack and (?) GDT/LDT... */
3778
3779 /* Create the stack frame. */
3780 uint8_t bUnmapInfoStackFrame;
3781 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3782 RTPTRUNION uStackFrame;
3783 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3784 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu64++ = uErr;
3790 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3791 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3792 uStackFrame.pu64[2] = fEfl;
3793 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3794 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3800 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3801 * after pushing the stack frame? (Write protect the gdt + stack to
3802 * find out.) */
3803 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3804 {
3805 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3806 if (rcStrict != VINF_SUCCESS)
3807 return rcStrict;
3808 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3809 }
3810
3811 /*
3812 * Start comitting the register changes.
3813 */
3814 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3815 * hidden registers when interrupting 32-bit or 16-bit code! */
3816 if (uNewCpl != uOldCpl)
3817 {
3818 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3820 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3822 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3823 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3824 }
3825 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3826 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.rip = uNewRip;
3833
3834 fEfl &= ~fEflToClear;
3835 IEMMISC_SET_EFL(pVCpu, fEfl);
3836
3837 if (fFlags & IEM_XCPT_FLAGS_CR2)
3838 pVCpu->cpum.GstCtx.cr2 = uCr2;
3839
3840 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3841 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3842
3843 iemRecalcExecModeAndCplFlags(pVCpu);
3844
3845 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements exceptions and interrupts.
3851 *
3852 * All exceptions and interrupts goes thru this function!
3853 *
3854 * @returns VBox strict status code.
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param cbInstr The number of bytes to offset rIP by in the return
3857 * address.
3858 * @param u8Vector The interrupt / exception vector number.
3859 * @param fFlags The flags.
3860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3862 */
3863VBOXSTRICTRC
3864iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3865 uint8_t cbInstr,
3866 uint8_t u8Vector,
3867 uint32_t fFlags,
3868 uint16_t uErr,
3869 uint64_t uCr2) RT_NOEXCEPT
3870{
3871 /*
3872 * Get all the state that we might need here.
3873 */
3874 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3876
3877#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3878 /*
3879 * Flush prefetch buffer
3880 */
3881 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3882#endif
3883
3884 /*
3885 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3886 */
3887 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3888 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3889 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3890 | IEM_XCPT_FLAGS_BP_INSTR
3891 | IEM_XCPT_FLAGS_ICEBP_INSTR
3892 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900
3901 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3902#ifdef DBGFTRACE_ENABLED
3903 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3904 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3906#endif
3907
3908 /*
3909 * Check if DBGF wants to intercept the exception.
3910 */
3911 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3913 { /* likely */ }
3914 else
3915 {
3916 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3917 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920 }
3921
3922 /*
3923 * Evaluate whether NMI blocking should be in effect.
3924 * Normally, NMI blocking is in effect whenever we inject an NMI.
3925 */
3926 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3927 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3928
3929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3930 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3931 {
3932 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3933 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3934 return rcStrict0;
3935
3936 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3937 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3938 {
3939 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3940 fBlockNmi = false;
3941 }
3942 }
3943#endif
3944
3945#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3946 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3947 {
3948 /*
3949 * If the event is being injected as part of VMRUN, it isn't subject to event
3950 * intercepts in the nested-guest. However, secondary exceptions that occur
3951 * during injection of any event -are- subject to exception intercepts.
3952 *
3953 * See AMD spec. 15.20 "Event Injection".
3954 */
3955 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3956 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3957 else
3958 {
3959 /*
3960 * Check and handle if the event being raised is intercepted.
3961 */
3962 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3963 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3964 return rcStrict0;
3965 }
3966 }
3967#endif
3968
3969 /*
3970 * Set NMI blocking if necessary.
3971 */
3972 if (fBlockNmi)
3973 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3974
3975 /*
3976 * Do recursion accounting.
3977 */
3978 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3979 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3980 if (pVCpu->iem.s.cXcptRecursions == 0)
3981 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3982 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3983 else
3984 {
3985 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3986 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3987 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3988
3989 if (pVCpu->iem.s.cXcptRecursions >= 4)
3990 {
3991#ifdef DEBUG_bird
3992 AssertFailed();
3993#endif
3994 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3995 }
3996
3997 /*
3998 * Evaluate the sequence of recurring events.
3999 */
4000 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4001 NULL /* pXcptRaiseInfo */);
4002 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4003 { /* likely */ }
4004 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4005 {
4006 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4007 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4008 u8Vector = X86_XCPT_DF;
4009 uErr = 0;
4010#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4011 /* VMX nested-guest #DF intercept needs to be checked here. */
4012 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4013 {
4014 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4015 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4016 return rcStrict0;
4017 }
4018#endif
4019 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4020 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4022 }
4023 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 return iemInitiateCpuShutdown(pVCpu);
4027 }
4028 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4029 {
4030 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4031 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4032 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4033 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4034 return VERR_EM_GUEST_CPU_HANG;
4035 }
4036 else
4037 {
4038 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4039 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4040 return VERR_IEM_IPE_9;
4041 }
4042
4043 /*
4044 * The 'EXT' bit is set when an exception occurs during deliver of an external
4045 * event (such as an interrupt or earlier exception)[1]. Privileged software
4046 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4047 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4048 *
4049 * [1] - Intel spec. 6.13 "Error Code"
4050 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4051 * [3] - Intel Instruction reference for INT n.
4052 */
4053 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4054 && (fFlags & IEM_XCPT_FLAGS_ERR)
4055 && u8Vector != X86_XCPT_PF
4056 && u8Vector != X86_XCPT_DF)
4057 {
4058 uErr |= X86_TRAP_ERR_EXTERNAL;
4059 }
4060 }
4061
4062 pVCpu->iem.s.cXcptRecursions++;
4063 pVCpu->iem.s.uCurXcpt = u8Vector;
4064 pVCpu->iem.s.fCurXcpt = fFlags;
4065 pVCpu->iem.s.uCurXcptErr = uErr;
4066 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4067
4068 /*
4069 * Extensive logging.
4070 */
4071#if defined(LOG_ENABLED) && defined(IN_RING3)
4072 if (LogIs3Enabled())
4073 {
4074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4075 char szRegs[4096];
4076 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4077 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4078 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4079 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4080 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4081 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4082 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4083 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4084 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4085 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4086 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4087 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4088 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4089 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4090 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4091 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4092 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4093 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4094 " efer=%016VR{efer}\n"
4095 " pat=%016VR{pat}\n"
4096 " sf_mask=%016VR{sf_mask}\n"
4097 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4098 " lstar=%016VR{lstar}\n"
4099 " star=%016VR{star} cstar=%016VR{cstar}\n"
4100 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4101 );
4102
4103 char szInstr[256];
4104 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4105 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szInstr, sizeof(szInstr), NULL);
4107 Log3(("%s%s\n", szRegs, szInstr));
4108 }
4109#endif /* LOG_ENABLED */
4110
4111 /*
4112 * Stats.
4113 */
4114 uint64_t const uTimestamp = ASMReadTSC();
4115 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4116 {
4117 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4118 EMHistoryAddExit(pVCpu,
4119 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4120 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4121 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4122 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4123 }
4124 else
4125 {
4126 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4128 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 if (fFlags & IEM_XCPT_FLAGS_ERR)
4131 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4132 if (fFlags & IEM_XCPT_FLAGS_CR2)
4133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4134 }
4135
4136 /*
4137 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4138 * to ensure that a stale TLB or paging cache entry will only cause one
4139 * spurious #PF.
4140 */
4141 if ( u8Vector == X86_XCPT_PF
4142 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4143 IEMTlbInvalidatePage(pVCpu, uCr2);
4144
4145 /*
4146 * Call the mode specific worker function.
4147 */
4148 VBOXSTRICTRC rcStrict;
4149 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4150 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4151 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4152 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4153 else
4154 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4155
4156 /* Flush the prefetch buffer. */
4157 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4158
4159 /*
4160 * Unwind.
4161 */
4162 pVCpu->iem.s.cXcptRecursions--;
4163 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4164 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4165 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4166 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4167 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4168 return rcStrict;
4169}
4170
4171#ifdef IEM_WITH_SETJMP
4172/**
4173 * See iemRaiseXcptOrInt. Will not return.
4174 */
4175DECL_NO_RETURN(void)
4176iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4177 uint8_t cbInstr,
4178 uint8_t u8Vector,
4179 uint32_t fFlags,
4180 uint16_t uErr,
4181 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4184 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4185}
4186#endif
4187
4188
4189/** \#DE - 00. */
4190VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4191{
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4193}
4194
4195
4196/** \#DB - 01.
4197 * @note This automatically clear DR7.GD. */
4198VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4199{
4200 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4201 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4203}
4204
4205
4206/** \#BR - 05. */
4207VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4210}
4211
4212
4213/** \#UD - 06. */
4214VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4217}
4218
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#UD - 06. */
4222DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#NM - 07. */
4230VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#NM - 07. */
4238DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#TS(err) - 0a. */
4246VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4249}
4250
4251
4252/** \#TS(tr) - 0a. */
4253VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4254{
4255 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4256 pVCpu->cpum.GstCtx.tr.Sel, 0);
4257}
4258
4259
4260/** \#TS(0) - 0a. */
4261VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4262{
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4264 0, 0);
4265}
4266
4267
4268/** \#TS(err) - 0a. */
4269VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4270{
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4272 uSel & X86_SEL_MASK_OFF_RPL, 0);
4273}
4274
4275
4276/** \#NP(err) - 0b. */
4277VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4278{
4279 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4280}
4281
4282
4283/** \#NP(sel) - 0b. */
4284VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4285{
4286 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4289 uSel & ~X86_SEL_RPL, 0);
4290}
4291
4292
4293/** \#SS(seg) - 0c. */
4294VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4295{
4296 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4299 uSel & ~X86_SEL_RPL, 0);
4300}
4301
4302
4303/** \#SS(err) - 0c. */
4304VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4309}
4310
4311
4312/** \#GP(n) - 0d. */
4313VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4314{
4315 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4317}
4318
4319
4320/** \#GP(0) - 0d. */
4321VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4322{
4323 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4325}
4326
4327#ifdef IEM_WITH_SETJMP
4328/** \#GP(0) - 0d. */
4329DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4330{
4331 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4332 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4333}
4334#endif
4335
4336
4337/** \#GP(sel) - 0d. */
4338VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4339{
4340 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4341 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4343 Sel & ~X86_SEL_RPL, 0);
4344}
4345
4346
4347/** \#GP(0) - 0d. */
4348VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4349{
4350 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4352}
4353
4354
4355/** \#GP(sel) - 0d. */
4356VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4357{
4358 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4359 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4360 NOREF(iSegReg); NOREF(fAccess);
4361 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4362 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4363}
4364
4365#ifdef IEM_WITH_SETJMP
4366/** \#GP(sel) - 0d, longjmp. */
4367DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4368{
4369 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4370 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4371 NOREF(iSegReg); NOREF(fAccess);
4372 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4373 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4374}
4375#endif
4376
4377/** \#GP(sel) - 0d. */
4378VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4379{
4380 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4381 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4382 NOREF(Sel);
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4384}
4385
4386#ifdef IEM_WITH_SETJMP
4387/** \#GP(sel) - 0d, longjmp. */
4388DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4389{
4390 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4392 NOREF(Sel);
4393 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4394}
4395#endif
4396
4397
4398/** \#GP(sel) - 0d. */
4399VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4400{
4401 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4402 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4403 NOREF(iSegReg); NOREF(fAccess);
4404 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4405}
4406
4407#ifdef IEM_WITH_SETJMP
4408/** \#GP(sel) - 0d, longjmp. */
4409DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4410{
4411 NOREF(iSegReg); NOREF(fAccess);
4412 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4413}
4414#endif
4415
4416
4417/** \#PF(n) - 0e. */
4418VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4419{
4420 uint16_t uErr;
4421 switch (rc)
4422 {
4423 case VERR_PAGE_NOT_PRESENT:
4424 case VERR_PAGE_TABLE_NOT_PRESENT:
4425 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4426 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4427 uErr = 0;
4428 break;
4429
4430 default:
4431 AssertMsgFailed(("%Rrc\n", rc));
4432 RT_FALL_THRU();
4433 case VERR_ACCESS_DENIED:
4434 uErr = X86_TRAP_PF_P;
4435 break;
4436
4437 /** @todo reserved */
4438 }
4439
4440 if (IEM_GET_CPL(pVCpu) == 3)
4441 uErr |= X86_TRAP_PF_US;
4442
4443 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4444 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4445 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4446 uErr |= X86_TRAP_PF_ID;
4447
4448#if 0 /* This is so much non-sense, really. Why was it done like that? */
4449 /* Note! RW access callers reporting a WRITE protection fault, will clear
4450 the READ flag before calling. So, read-modify-write accesses (RW)
4451 can safely be reported as READ faults. */
4452 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4453 uErr |= X86_TRAP_PF_RW;
4454#else
4455 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4456 {
4457 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4458 /// (regardless of outcome of the comparison in the latter case).
4459 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4460 uErr |= X86_TRAP_PF_RW;
4461 }
4462#endif
4463
4464 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4465 of the memory operand rather than at the start of it. (Not sure what
4466 happens if it crosses a page boundrary.) The current heuristics for
4467 this is to report the #PF for the last byte if the access is more than
4468 64 bytes. This is probably not correct, but we can work that out later,
4469 main objective now is to get FXSAVE to work like for real hardware and
4470 make bs3-cpu-basic2 work. */
4471 if (cbAccess <= 64)
4472 { /* likely*/ }
4473 else
4474 GCPtrWhere += cbAccess - 1;
4475
4476 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4477 uErr, GCPtrWhere);
4478}
4479
4480#ifdef IEM_WITH_SETJMP
4481/** \#PF(n) - 0e, longjmp. */
4482DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4483 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4484{
4485 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4486}
4487#endif
4488
4489
4490/** \#MF(0) - 10. */
4491VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4492{
4493 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4494 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4495
4496 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4497 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4498 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4499}
4500
4501#ifdef IEM_WITH_SETJMP
4502/** \#MF(0) - 10, longjmp. */
4503DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4504{
4505 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4506}
4507#endif
4508
4509
4510/** \#AC(0) - 11. */
4511VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4512{
4513 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4514}
4515
4516#ifdef IEM_WITH_SETJMP
4517/** \#AC(0) - 11, longjmp. */
4518DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4519{
4520 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4521}
4522#endif
4523
4524
4525/** \#XF(0)/\#XM(0) - 19. */
4526VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4527{
4528 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4529}
4530
4531
4532#ifdef IEM_WITH_SETJMP
4533/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4534DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4535{
4536 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4537}
4538#endif
4539
4540
4541/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4542IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4543{
4544 NOREF(cbInstr);
4545 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4546}
4547
4548
4549/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4550IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4551{
4552 NOREF(cbInstr);
4553 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4554}
4555
4556
4557/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4558IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4559{
4560 NOREF(cbInstr);
4561 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4562}
4563
4564
4565/** @} */
4566
4567/** @name Common opcode decoders.
4568 * @{
4569 */
4570//#include <iprt/mem.h>
4571
4572/**
4573 * Used to add extra details about a stub case.
4574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4575 */
4576void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4577{
4578#if defined(LOG_ENABLED) && defined(IN_RING3)
4579 PVM pVM = pVCpu->CTX_SUFF(pVM);
4580 char szRegs[4096];
4581 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4582 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4583 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4584 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4585 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4586 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4587 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4588 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4589 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4590 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4591 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4592 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4593 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4594 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4595 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4596 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4597 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4598 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4599 " efer=%016VR{efer}\n"
4600 " pat=%016VR{pat}\n"
4601 " sf_mask=%016VR{sf_mask}\n"
4602 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4603 " lstar=%016VR{lstar}\n"
4604 " star=%016VR{star} cstar=%016VR{cstar}\n"
4605 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4606 );
4607
4608 char szInstr[256];
4609 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4610 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4611 szInstr, sizeof(szInstr), NULL);
4612
4613 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4614#else
4615 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4616#endif
4617}
4618
4619/** @} */
4620
4621
4622
4623/** @name Register Access.
4624 * @{
4625 */
4626
4627/**
4628 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4629 *
4630 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4631 * segment limit.
4632 *
4633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4634 * @param cbInstr Instruction size.
4635 * @param offNextInstr The offset of the next instruction.
4636 * @param enmEffOpSize Effective operand size.
4637 */
4638VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4639 IEMMODE enmEffOpSize) RT_NOEXCEPT
4640{
4641 switch (enmEffOpSize)
4642 {
4643 case IEMMODE_16BIT:
4644 {
4645 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4646 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4647 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4648 pVCpu->cpum.GstCtx.rip = uNewIp;
4649 else
4650 return iemRaiseGeneralProtectionFault0(pVCpu);
4651 break;
4652 }
4653
4654 case IEMMODE_32BIT:
4655 {
4656 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4657 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4658
4659 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4660 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4661 pVCpu->cpum.GstCtx.rip = uNewEip;
4662 else
4663 return iemRaiseGeneralProtectionFault0(pVCpu);
4664 break;
4665 }
4666
4667 case IEMMODE_64BIT:
4668 {
4669 Assert(IEM_IS_64BIT_CODE(pVCpu));
4670
4671 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4672 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4673 pVCpu->cpum.GstCtx.rip = uNewRip;
4674 else
4675 return iemRaiseGeneralProtectionFault0(pVCpu);
4676 break;
4677 }
4678
4679 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4680 }
4681
4682#ifndef IEM_WITH_CODE_TLB
4683 /* Flush the prefetch buffer. */
4684 pVCpu->iem.s.cbOpcode = cbInstr;
4685#endif
4686
4687 /*
4688 * Clear RF and finish the instruction (maybe raise #DB).
4689 */
4690 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4691}
4692
4693
4694/**
4695 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4696 *
4697 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4698 * segment limit.
4699 *
4700 * @returns Strict VBox status code.
4701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4702 * @param cbInstr Instruction size.
4703 * @param offNextInstr The offset of the next instruction.
4704 */
4705VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4706{
4707 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4708
4709 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4710 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4711 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4712 pVCpu->cpum.GstCtx.rip = uNewIp;
4713 else
4714 return iemRaiseGeneralProtectionFault0(pVCpu);
4715
4716#ifndef IEM_WITH_CODE_TLB
4717 /* Flush the prefetch buffer. */
4718 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4719#endif
4720
4721 /*
4722 * Clear RF and finish the instruction (maybe raise #DB).
4723 */
4724 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4725}
4726
4727
4728/**
4729 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4730 *
4731 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4732 * segment limit.
4733 *
4734 * @returns Strict VBox status code.
4735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4736 * @param cbInstr Instruction size.
4737 * @param offNextInstr The offset of the next instruction.
4738 * @param enmEffOpSize Effective operand size.
4739 */
4740VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4741 IEMMODE enmEffOpSize) RT_NOEXCEPT
4742{
4743 if (enmEffOpSize == IEMMODE_32BIT)
4744 {
4745 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4746
4747 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4748 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4749 pVCpu->cpum.GstCtx.rip = uNewEip;
4750 else
4751 return iemRaiseGeneralProtectionFault0(pVCpu);
4752 }
4753 else
4754 {
4755 Assert(enmEffOpSize == IEMMODE_64BIT);
4756
4757 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4758 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4759 pVCpu->cpum.GstCtx.rip = uNewRip;
4760 else
4761 return iemRaiseGeneralProtectionFault0(pVCpu);
4762 }
4763
4764#ifndef IEM_WITH_CODE_TLB
4765 /* Flush the prefetch buffer. */
4766 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4767#endif
4768
4769 /*
4770 * Clear RF and finish the instruction (maybe raise #DB).
4771 */
4772 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4773}
4774
4775/** @} */
4776
4777
4778/** @name FPU access and helpers.
4779 *
4780 * @{
4781 */
4782
4783/**
4784 * Updates the x87.DS and FPUDP registers.
4785 *
4786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4787 * @param pFpuCtx The FPU context.
4788 * @param iEffSeg The effective segment register.
4789 * @param GCPtrEff The effective address relative to @a iEffSeg.
4790 */
4791DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4792{
4793 RTSEL sel;
4794 switch (iEffSeg)
4795 {
4796 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4797 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4798 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4799 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4800 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4801 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4802 default:
4803 AssertMsgFailed(("%d\n", iEffSeg));
4804 sel = pVCpu->cpum.GstCtx.ds.Sel;
4805 }
4806 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4807 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4808 {
4809 pFpuCtx->DS = 0;
4810 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4811 }
4812 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4813 {
4814 pFpuCtx->DS = sel;
4815 pFpuCtx->FPUDP = GCPtrEff;
4816 }
4817 else
4818 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4819}
4820
4821
4822/**
4823 * Rotates the stack registers in the push direction.
4824 *
4825 * @param pFpuCtx The FPU context.
4826 * @remarks This is a complete waste of time, but fxsave stores the registers in
4827 * stack order.
4828 */
4829DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4830{
4831 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4832 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4833 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4834 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4835 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4836 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4837 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4838 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4839 pFpuCtx->aRegs[0].r80 = r80Tmp;
4840}
4841
4842
4843/**
4844 * Rotates the stack registers in the pop direction.
4845 *
4846 * @param pFpuCtx The FPU context.
4847 * @remarks This is a complete waste of time, but fxsave stores the registers in
4848 * stack order.
4849 */
4850DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4851{
4852 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4853 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4854 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4855 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4856 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4857 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4858 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4859 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4860 pFpuCtx->aRegs[7].r80 = r80Tmp;
4861}
4862
4863
4864/**
4865 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4866 * exception prevents it.
4867 *
4868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4869 * @param pResult The FPU operation result to push.
4870 * @param pFpuCtx The FPU context.
4871 */
4872static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4873{
4874 /* Update FSW and bail if there are pending exceptions afterwards. */
4875 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4876 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4877 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4878 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4879 {
4880 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4881 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4882 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4883 pFpuCtx->FSW = fFsw;
4884 return;
4885 }
4886
4887 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4888 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4889 {
4890 /* All is fine, push the actual value. */
4891 pFpuCtx->FTW |= RT_BIT(iNewTop);
4892 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4893 }
4894 else if (pFpuCtx->FCW & X86_FCW_IM)
4895 {
4896 /* Masked stack overflow, push QNaN. */
4897 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4898 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4899 }
4900 else
4901 {
4902 /* Raise stack overflow, don't push anything. */
4903 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4904 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4905 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4906 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4907 return;
4908 }
4909
4910 fFsw &= ~X86_FSW_TOP_MASK;
4911 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4912 pFpuCtx->FSW = fFsw;
4913
4914 iemFpuRotateStackPush(pFpuCtx);
4915 RT_NOREF(pVCpu);
4916}
4917
4918
4919/**
4920 * Stores a result in a FPU register and updates the FSW and FTW.
4921 *
4922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4923 * @param pFpuCtx The FPU context.
4924 * @param pResult The result to store.
4925 * @param iStReg Which FPU register to store it in.
4926 */
4927static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4928{
4929 Assert(iStReg < 8);
4930 uint16_t fNewFsw = pFpuCtx->FSW;
4931 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4932 fNewFsw &= ~X86_FSW_C_MASK;
4933 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4934 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4935 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4936 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4937 pFpuCtx->FSW = fNewFsw;
4938 pFpuCtx->FTW |= RT_BIT(iReg);
4939 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4940 RT_NOREF(pVCpu);
4941}
4942
4943
4944/**
4945 * Only updates the FPU status word (FSW) with the result of the current
4946 * instruction.
4947 *
4948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4949 * @param pFpuCtx The FPU context.
4950 * @param u16FSW The FSW output of the current instruction.
4951 */
4952static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4953{
4954 uint16_t fNewFsw = pFpuCtx->FSW;
4955 fNewFsw &= ~X86_FSW_C_MASK;
4956 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4957 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4958 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4959 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4960 pFpuCtx->FSW = fNewFsw;
4961 RT_NOREF(pVCpu);
4962}
4963
4964
4965/**
4966 * Pops one item off the FPU stack if no pending exception prevents it.
4967 *
4968 * @param pFpuCtx The FPU context.
4969 */
4970static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4971{
4972 /* Check pending exceptions. */
4973 uint16_t uFSW = pFpuCtx->FSW;
4974 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4975 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4976 return;
4977
4978 /* TOP--. */
4979 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4980 uFSW &= ~X86_FSW_TOP_MASK;
4981 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4982 pFpuCtx->FSW = uFSW;
4983
4984 /* Mark the previous ST0 as empty. */
4985 iOldTop >>= X86_FSW_TOP_SHIFT;
4986 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4987
4988 /* Rotate the registers. */
4989 iemFpuRotateStackPop(pFpuCtx);
4990}
4991
4992
4993/**
4994 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4995 *
4996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4997 * @param pResult The FPU operation result to push.
4998 * @param uFpuOpcode The FPU opcode value.
4999 */
5000void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5001{
5002 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5003 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5004 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5005}
5006
5007
5008/**
5009 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5010 * and sets FPUDP and FPUDS.
5011 *
5012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5013 * @param pResult The FPU operation result to push.
5014 * @param iEffSeg The effective segment register.
5015 * @param GCPtrEff The effective address relative to @a iEffSeg.
5016 * @param uFpuOpcode The FPU opcode value.
5017 */
5018void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5019 uint16_t uFpuOpcode) RT_NOEXCEPT
5020{
5021 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5022 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5023 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5024 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5025}
5026
5027
5028/**
5029 * Replace ST0 with the first value and push the second onto the FPU stack,
5030 * unless a pending exception prevents it.
5031 *
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param pResult The FPU operation result to store and push.
5034 * @param uFpuOpcode The FPU opcode value.
5035 */
5036void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5037{
5038 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5039 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5040
5041 /* Update FSW and bail if there are pending exceptions afterwards. */
5042 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5043 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5044 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5045 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5046 {
5047 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5048 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5049 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5050 pFpuCtx->FSW = fFsw;
5051 return;
5052 }
5053
5054 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5055 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5056 {
5057 /* All is fine, push the actual value. */
5058 pFpuCtx->FTW |= RT_BIT(iNewTop);
5059 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5060 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5061 }
5062 else if (pFpuCtx->FCW & X86_FCW_IM)
5063 {
5064 /* Masked stack overflow, push QNaN. */
5065 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5066 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5067 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5068 }
5069 else
5070 {
5071 /* Raise stack overflow, don't push anything. */
5072 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5073 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5074 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5075 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5076 return;
5077 }
5078
5079 fFsw &= ~X86_FSW_TOP_MASK;
5080 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5081 pFpuCtx->FSW = fFsw;
5082
5083 iemFpuRotateStackPush(pFpuCtx);
5084}
5085
5086
5087/**
5088 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5089 * FOP.
5090 *
5091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5092 * @param pResult The result to store.
5093 * @param iStReg Which FPU register to store it in.
5094 * @param uFpuOpcode The FPU opcode value.
5095 */
5096void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5097{
5098 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5099 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5100 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5101}
5102
5103
5104/**
5105 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5106 * FOP, and then pops the stack.
5107 *
5108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5109 * @param pResult The result to store.
5110 * @param iStReg Which FPU register to store it in.
5111 * @param uFpuOpcode The FPU opcode value.
5112 */
5113void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5114{
5115 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5116 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5117 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5118 iemFpuMaybePopOne(pFpuCtx);
5119}
5120
5121
5122/**
5123 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5124 * FPUDP, and FPUDS.
5125 *
5126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5127 * @param pResult The result to store.
5128 * @param iStReg Which FPU register to store it in.
5129 * @param iEffSeg The effective memory operand selector register.
5130 * @param GCPtrEff The effective memory operand offset.
5131 * @param uFpuOpcode The FPU opcode value.
5132 */
5133void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5134 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5135{
5136 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5137 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5138 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5139 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5140}
5141
5142
5143/**
5144 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5145 * FPUDP, and FPUDS, and then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param pResult The result to store.
5149 * @param iStReg Which FPU register to store it in.
5150 * @param iEffSeg The effective memory operand selector register.
5151 * @param GCPtrEff The effective memory operand offset.
5152 * @param uFpuOpcode The FPU opcode value.
5153 */
5154void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5155 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5156{
5157 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5158 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5159 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5160 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5161 iemFpuMaybePopOne(pFpuCtx);
5162}
5163
5164
5165/**
5166 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5167 *
5168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5169 * @param uFpuOpcode The FPU opcode value.
5170 */
5171void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5172{
5173 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5174 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190}
5191
5192
5193/**
5194 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5195 *
5196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5197 * @param u16FSW The FSW from the current instruction.
5198 * @param uFpuOpcode The FPU opcode value.
5199 */
5200void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5201{
5202 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5203 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5204 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5205 iemFpuMaybePopOne(pFpuCtx);
5206}
5207
5208
5209/**
5210 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5211 *
5212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5213 * @param u16FSW The FSW from the current instruction.
5214 * @param iEffSeg The effective memory operand selector register.
5215 * @param GCPtrEff The effective memory operand offset.
5216 * @param uFpuOpcode The FPU opcode value.
5217 */
5218void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5219{
5220 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5221 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5222 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5223 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5224}
5225
5226
5227/**
5228 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5229 *
5230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5231 * @param u16FSW The FSW from the current instruction.
5232 * @param uFpuOpcode The FPU opcode value.
5233 */
5234void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5235{
5236 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5237 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5238 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5239 iemFpuMaybePopOne(pFpuCtx);
5240 iemFpuMaybePopOne(pFpuCtx);
5241}
5242
5243
5244/**
5245 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5246 *
5247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5248 * @param u16FSW The FSW from the current instruction.
5249 * @param iEffSeg The effective memory operand selector register.
5250 * @param GCPtrEff The effective memory operand offset.
5251 * @param uFpuOpcode The FPU opcode value.
5252 */
5253void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5254 uint16_t uFpuOpcode) RT_NOEXCEPT
5255{
5256 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5257 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5258 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5259 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5260 iemFpuMaybePopOne(pFpuCtx);
5261}
5262
5263
5264/**
5265 * Worker routine for raising an FPU stack underflow exception.
5266 *
5267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5268 * @param pFpuCtx The FPU context.
5269 * @param iStReg The stack register being accessed.
5270 */
5271static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5272{
5273 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5274 if (pFpuCtx->FCW & X86_FCW_IM)
5275 {
5276 /* Masked underflow. */
5277 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5278 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5279 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5280 if (iStReg != UINT8_MAX)
5281 {
5282 pFpuCtx->FTW |= RT_BIT(iReg);
5283 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5284 }
5285 }
5286 else
5287 {
5288 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5289 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5290 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5291 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5292 }
5293 RT_NOREF(pVCpu);
5294}
5295
5296
5297/**
5298 * Raises a FPU stack underflow exception.
5299 *
5300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5301 * @param iStReg The destination register that should be loaded
5302 * with QNaN if \#IS is not masked. Specify
5303 * UINT8_MAX if none (like for fcom).
5304 * @param uFpuOpcode The FPU opcode value.
5305 */
5306void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5307{
5308 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5309 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5310 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5311}
5312
5313
5314void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5315{
5316 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5317 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5318 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5319 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5320}
5321
5322
5323void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5324{
5325 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5326 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5327 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5328 iemFpuMaybePopOne(pFpuCtx);
5329}
5330
5331
5332void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5333 uint16_t uFpuOpcode) RT_NOEXCEPT
5334{
5335 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5336 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5337 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5338 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5339 iemFpuMaybePopOne(pFpuCtx);
5340}
5341
5342
5343void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5347 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5348 iemFpuMaybePopOne(pFpuCtx);
5349 iemFpuMaybePopOne(pFpuCtx);
5350}
5351
5352
5353void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5357
5358 if (pFpuCtx->FCW & X86_FCW_IM)
5359 {
5360 /* Masked overflow - Push QNaN. */
5361 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5362 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5363 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5364 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5365 pFpuCtx->FTW |= RT_BIT(iNewTop);
5366 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5367 iemFpuRotateStackPush(pFpuCtx);
5368 }
5369 else
5370 {
5371 /* Exception pending - don't change TOP or the register stack. */
5372 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5373 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5374 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5375 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5376 }
5377}
5378
5379
5380void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5381{
5382 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5383 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5384
5385 if (pFpuCtx->FCW & X86_FCW_IM)
5386 {
5387 /* Masked overflow - Push QNaN. */
5388 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5389 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5390 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5391 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5392 pFpuCtx->FTW |= RT_BIT(iNewTop);
5393 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5394 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5395 iemFpuRotateStackPush(pFpuCtx);
5396 }
5397 else
5398 {
5399 /* Exception pending - don't change TOP or the register stack. */
5400 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5401 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5402 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5403 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5404 }
5405}
5406
5407
5408/**
5409 * Worker routine for raising an FPU stack overflow exception on a push.
5410 *
5411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5412 * @param pFpuCtx The FPU context.
5413 */
5414static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5415{
5416 if (pFpuCtx->FCW & X86_FCW_IM)
5417 {
5418 /* Masked overflow. */
5419 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5420 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5421 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5422 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5423 pFpuCtx->FTW |= RT_BIT(iNewTop);
5424 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5425 iemFpuRotateStackPush(pFpuCtx);
5426 }
5427 else
5428 {
5429 /* Exception pending - don't change TOP or the register stack. */
5430 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5431 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5432 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5433 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5434 }
5435 RT_NOREF(pVCpu);
5436}
5437
5438
5439/**
5440 * Raises a FPU stack overflow exception on a push.
5441 *
5442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5443 * @param uFpuOpcode The FPU opcode value.
5444 */
5445void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5446{
5447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5448 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5449 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5450}
5451
5452
5453/**
5454 * Raises a FPU stack overflow exception on a push with a memory operand.
5455 *
5456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5457 * @param iEffSeg The effective memory operand selector register.
5458 * @param GCPtrEff The effective memory operand offset.
5459 * @param uFpuOpcode The FPU opcode value.
5460 */
5461void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5462{
5463 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5464 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5465 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5466 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5467}
5468
5469/** @} */
5470
5471
5472/** @name SSE+AVX SIMD access and helpers.
5473 *
5474 * @{
5475 */
5476/**
5477 * Stores a result in a SIMD XMM register, updates the MXCSR.
5478 *
5479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5480 * @param pResult The result to store.
5481 * @param iXmmReg Which SIMD XMM register to store the result in.
5482 */
5483void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5484{
5485 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5486 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5487
5488 /* The result is only updated if there is no unmasked exception pending. */
5489 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5490 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5491 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5492}
5493
5494
5495/**
5496 * Updates the MXCSR.
5497 *
5498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5499 * @param fMxcsr The new MXCSR value.
5500 */
5501void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5502{
5503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5504 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5505}
5506/** @} */
5507
5508
5509/** @name Memory access.
5510 *
5511 * @{
5512 */
5513
5514#undef LOG_GROUP
5515#define LOG_GROUP LOG_GROUP_IEM_MEM
5516
5517/**
5518 * Updates the IEMCPU::cbWritten counter if applicable.
5519 *
5520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5521 * @param fAccess The access being accounted for.
5522 * @param cbMem The access size.
5523 */
5524DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5525{
5526 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5527 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5528 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5529}
5530
5531
5532/**
5533 * Applies the segment limit, base and attributes.
5534 *
5535 * This may raise a \#GP or \#SS.
5536 *
5537 * @returns VBox strict status code.
5538 *
5539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5540 * @param fAccess The kind of access which is being performed.
5541 * @param iSegReg The index of the segment register to apply.
5542 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5543 * TSS, ++).
5544 * @param cbMem The access size.
5545 * @param pGCPtrMem Pointer to the guest memory address to apply
5546 * segmentation to. Input and output parameter.
5547 */
5548VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5549{
5550 if (iSegReg == UINT8_MAX)
5551 return VINF_SUCCESS;
5552
5553 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5554 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5555 switch (IEM_GET_CPU_MODE(pVCpu))
5556 {
5557 case IEMMODE_16BIT:
5558 case IEMMODE_32BIT:
5559 {
5560 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5561 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5562
5563 if ( pSel->Attr.n.u1Present
5564 && !pSel->Attr.n.u1Unusable)
5565 {
5566 Assert(pSel->Attr.n.u1DescType);
5567 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5568 {
5569 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5570 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5571 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5572
5573 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5574 {
5575 /** @todo CPL check. */
5576 }
5577
5578 /*
5579 * There are two kinds of data selectors, normal and expand down.
5580 */
5581 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5582 {
5583 if ( GCPtrFirst32 > pSel->u32Limit
5584 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5585 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5586 }
5587 else
5588 {
5589 /*
5590 * The upper boundary is defined by the B bit, not the G bit!
5591 */
5592 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5593 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5594 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5595 }
5596 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5597 }
5598 else
5599 {
5600 /*
5601 * Code selector and usually be used to read thru, writing is
5602 * only permitted in real and V8086 mode.
5603 */
5604 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5605 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5606 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5607 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5608 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5609
5610 if ( GCPtrFirst32 > pSel->u32Limit
5611 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5612 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5613
5614 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5615 {
5616 /** @todo CPL check. */
5617 }
5618
5619 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5620 }
5621 }
5622 else
5623 return iemRaiseGeneralProtectionFault0(pVCpu);
5624 return VINF_SUCCESS;
5625 }
5626
5627 case IEMMODE_64BIT:
5628 {
5629 RTGCPTR GCPtrMem = *pGCPtrMem;
5630 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5631 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5632
5633 Assert(cbMem >= 1);
5634 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5635 return VINF_SUCCESS;
5636 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5637 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5638 return iemRaiseGeneralProtectionFault0(pVCpu);
5639 }
5640
5641 default:
5642 AssertFailedReturn(VERR_IEM_IPE_7);
5643 }
5644}
5645
5646
5647/**
5648 * Translates a virtual address to a physical physical address and checks if we
5649 * can access the page as specified.
5650 *
5651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5652 * @param GCPtrMem The virtual address.
5653 * @param cbAccess The access size, for raising \#PF correctly for
5654 * FXSAVE and such.
5655 * @param fAccess The intended access.
5656 * @param pGCPhysMem Where to return the physical address.
5657 */
5658VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5659 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5660{
5661 /** @todo Need a different PGM interface here. We're currently using
5662 * generic / REM interfaces. this won't cut it for R0. */
5663 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5664 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5665 * here. */
5666 PGMPTWALK Walk;
5667 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5668 if (RT_FAILURE(rc))
5669 {
5670 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5671 /** @todo Check unassigned memory in unpaged mode. */
5672 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5673#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5674 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5675 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5676#endif
5677 *pGCPhysMem = NIL_RTGCPHYS;
5678 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5679 }
5680
5681 /* If the page is writable and does not have the no-exec bit set, all
5682 access is allowed. Otherwise we'll have to check more carefully... */
5683 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5684 {
5685 /* Write to read only memory? */
5686 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5687 && !(Walk.fEffective & X86_PTE_RW)
5688 && ( ( IEM_GET_CPL(pVCpu) == 3
5689 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5690 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5691 {
5692 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5693 *pGCPhysMem = NIL_RTGCPHYS;
5694#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5695 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5696 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5697#endif
5698 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5699 }
5700
5701 /* Kernel memory accessed by userland? */
5702 if ( !(Walk.fEffective & X86_PTE_US)
5703 && IEM_GET_CPL(pVCpu) == 3
5704 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5705 {
5706 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5707 *pGCPhysMem = NIL_RTGCPHYS;
5708#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5709 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5710 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5711#endif
5712 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5713 }
5714
5715 /* Executing non-executable memory? */
5716 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5717 && (Walk.fEffective & X86_PTE_PAE_NX)
5718 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5719 {
5720 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5721 *pGCPhysMem = NIL_RTGCPHYS;
5722#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5723 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5724 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5725#endif
5726 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5727 VERR_ACCESS_DENIED);
5728 }
5729 }
5730
5731 /*
5732 * Set the dirty / access flags.
5733 * ASSUMES this is set when the address is translated rather than on committ...
5734 */
5735 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5736 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5737 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5738 {
5739 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5740 AssertRC(rc2);
5741 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5742 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5743 }
5744
5745 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5746 *pGCPhysMem = GCPhys;
5747 return VINF_SUCCESS;
5748}
5749
5750#if 0 /*unused*/
5751/**
5752 * Looks up a memory mapping entry.
5753 *
5754 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5756 * @param pvMem The memory address.
5757 * @param fAccess The access to.
5758 */
5759DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5760{
5761 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5762 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5763 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5764 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5765 return 0;
5766 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5767 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5768 return 1;
5769 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5770 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5771 return 2;
5772 return VERR_NOT_FOUND;
5773}
5774#endif
5775
5776/**
5777 * Finds a free memmap entry when using iNextMapping doesn't work.
5778 *
5779 * @returns Memory mapping index, 1024 on failure.
5780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5781 */
5782static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5783{
5784 /*
5785 * The easy case.
5786 */
5787 if (pVCpu->iem.s.cActiveMappings == 0)
5788 {
5789 pVCpu->iem.s.iNextMapping = 1;
5790 return 0;
5791 }
5792
5793 /* There should be enough mappings for all instructions. */
5794 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5795
5796 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5797 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5798 return i;
5799
5800 AssertFailedReturn(1024);
5801}
5802
5803
5804/**
5805 * Commits a bounce buffer that needs writing back and unmaps it.
5806 *
5807 * @returns Strict VBox status code.
5808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5809 * @param iMemMap The index of the buffer to commit.
5810 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5811 * Always false in ring-3, obviously.
5812 */
5813static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5814{
5815 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5816 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5817#ifdef IN_RING3
5818 Assert(!fPostponeFail);
5819 RT_NOREF_PV(fPostponeFail);
5820#endif
5821
5822 /*
5823 * Do the writing.
5824 */
5825 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5826 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5827 {
5828 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5829 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5830 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5831 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5832 {
5833 /*
5834 * Carefully and efficiently dealing with access handler return
5835 * codes make this a little bloated.
5836 */
5837 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5839 pbBuf,
5840 cbFirst,
5841 PGMACCESSORIGIN_IEM);
5842 if (rcStrict == VINF_SUCCESS)
5843 {
5844 if (cbSecond)
5845 {
5846 rcStrict = PGMPhysWrite(pVM,
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5848 pbBuf + cbFirst,
5849 cbSecond,
5850 PGMACCESSORIGIN_IEM);
5851 if (rcStrict == VINF_SUCCESS)
5852 { /* nothing */ }
5853 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5854 {
5855 LogEx(LOG_GROUP_IEM,
5856 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5860 }
5861#ifndef IN_RING3
5862 else if (fPostponeFail)
5863 {
5864 LogEx(LOG_GROUP_IEM,
5865 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5868 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5869 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5870 return iemSetPassUpStatus(pVCpu, rcStrict);
5871 }
5872#endif
5873 else
5874 {
5875 LogEx(LOG_GROUP_IEM,
5876 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5879 return rcStrict;
5880 }
5881 }
5882 }
5883 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5884 {
5885 if (!cbSecond)
5886 {
5887 LogEx(LOG_GROUP_IEM,
5888 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5890 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892 else
5893 {
5894 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5896 pbBuf + cbFirst,
5897 cbSecond,
5898 PGMACCESSORIGIN_IEM);
5899 if (rcStrict2 == VINF_SUCCESS)
5900 {
5901 LogEx(LOG_GROUP_IEM,
5902 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5905 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5906 }
5907 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5908 {
5909 LogEx(LOG_GROUP_IEM,
5910 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5913 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5914 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5915 }
5916#ifndef IN_RING3
5917 else if (fPostponeFail)
5918 {
5919 LogEx(LOG_GROUP_IEM,
5920 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5923 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5924 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5925 return iemSetPassUpStatus(pVCpu, rcStrict);
5926 }
5927#endif
5928 else
5929 {
5930 LogEx(LOG_GROUP_IEM,
5931 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5934 return rcStrict2;
5935 }
5936 }
5937 }
5938#ifndef IN_RING3
5939 else if (fPostponeFail)
5940 {
5941 LogEx(LOG_GROUP_IEM,
5942 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5945 if (!cbSecond)
5946 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5947 else
5948 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5949 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5950 return iemSetPassUpStatus(pVCpu, rcStrict);
5951 }
5952#endif
5953 else
5954 {
5955 LogEx(LOG_GROUP_IEM,
5956 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5958 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5959 return rcStrict;
5960 }
5961 }
5962 else
5963 {
5964 /*
5965 * No access handlers, much simpler.
5966 */
5967 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5968 if (RT_SUCCESS(rc))
5969 {
5970 if (cbSecond)
5971 {
5972 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5973 if (RT_SUCCESS(rc))
5974 { /* likely */ }
5975 else
5976 {
5977 LogEx(LOG_GROUP_IEM,
5978 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5979 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5980 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5981 return rc;
5982 }
5983 }
5984 }
5985 else
5986 {
5987 LogEx(LOG_GROUP_IEM,
5988 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5989 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5990 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5991 return rc;
5992 }
5993 }
5994 }
5995
5996#if defined(IEM_LOG_MEMORY_WRITES)
5997 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5998 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5999 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6000 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6001 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6002 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6003
6004 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6005 g_cbIemWrote = cbWrote;
6006 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6007#endif
6008
6009 /*
6010 * Free the mapping entry.
6011 */
6012 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6013 Assert(pVCpu->iem.s.cActiveMappings != 0);
6014 pVCpu->iem.s.cActiveMappings--;
6015 return VINF_SUCCESS;
6016}
6017
6018
6019/**
6020 * iemMemMap worker that deals with a request crossing pages.
6021 */
6022static VBOXSTRICTRC
6023iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6024 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6025{
6026 Assert(cbMem <= GUEST_PAGE_SIZE);
6027
6028 /*
6029 * Do the address translations.
6030 */
6031 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6032 RTGCPHYS GCPhysFirst;
6033 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6034 if (rcStrict != VINF_SUCCESS)
6035 return rcStrict;
6036 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6037
6038 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6039 RTGCPHYS GCPhysSecond;
6040 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6041 cbSecondPage, fAccess, &GCPhysSecond);
6042 if (rcStrict != VINF_SUCCESS)
6043 return rcStrict;
6044 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6045 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6046
6047 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6048
6049 /*
6050 * Read in the current memory content if it's a read, execute or partial
6051 * write access.
6052 */
6053 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6054
6055 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6056 {
6057 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6058 {
6059 /*
6060 * Must carefully deal with access handler status codes here,
6061 * makes the code a bit bloated.
6062 */
6063 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6064 if (rcStrict == VINF_SUCCESS)
6065 {
6066 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6067 if (rcStrict == VINF_SUCCESS)
6068 { /*likely */ }
6069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6070 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6071 else
6072 {
6073 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6074 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6075 return rcStrict;
6076 }
6077 }
6078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6079 {
6080 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6081 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6082 {
6083 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6084 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6085 }
6086 else
6087 {
6088 LogEx(LOG_GROUP_IEM,
6089 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6090 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6091 return rcStrict2;
6092 }
6093 }
6094 else
6095 {
6096 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6097 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6098 return rcStrict;
6099 }
6100 }
6101 else
6102 {
6103 /*
6104 * No informational status codes here, much more straight forward.
6105 */
6106 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6107 if (RT_SUCCESS(rc))
6108 {
6109 Assert(rc == VINF_SUCCESS);
6110 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6111 if (RT_SUCCESS(rc))
6112 Assert(rc == VINF_SUCCESS);
6113 else
6114 {
6115 LogEx(LOG_GROUP_IEM,
6116 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6117 return rc;
6118 }
6119 }
6120 else
6121 {
6122 LogEx(LOG_GROUP_IEM,
6123 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6124 return rc;
6125 }
6126 }
6127 }
6128#ifdef VBOX_STRICT
6129 else
6130 memset(pbBuf, 0xcc, cbMem);
6131 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6132 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6133#endif
6134 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6135
6136 /*
6137 * Commit the bounce buffer entry.
6138 */
6139 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6141 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6142 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6143 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6144 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6145 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6146 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6147 pVCpu->iem.s.cActiveMappings++;
6148
6149 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6150 *ppvMem = pbBuf;
6151 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6152 return VINF_SUCCESS;
6153}
6154
6155
6156/**
6157 * iemMemMap woker that deals with iemMemPageMap failures.
6158 */
6159static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6160 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6161{
6162 /*
6163 * Filter out conditions we can handle and the ones which shouldn't happen.
6164 */
6165 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6166 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6167 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6168 {
6169 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6170 return rcMap;
6171 }
6172 pVCpu->iem.s.cPotentialExits++;
6173
6174 /*
6175 * Read in the current memory content if it's a read, execute or partial
6176 * write access.
6177 */
6178 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6179 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6180 {
6181 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6182 memset(pbBuf, 0xff, cbMem);
6183 else
6184 {
6185 int rc;
6186 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6187 {
6188 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6189 if (rcStrict == VINF_SUCCESS)
6190 { /* nothing */ }
6191 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6192 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6193 else
6194 {
6195 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6196 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6197 return rcStrict;
6198 }
6199 }
6200 else
6201 {
6202 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6203 if (RT_SUCCESS(rc))
6204 { /* likely */ }
6205 else
6206 {
6207 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6208 GCPhysFirst, rc));
6209 return rc;
6210 }
6211 }
6212 }
6213 }
6214#ifdef VBOX_STRICT
6215 else
6216 memset(pbBuf, 0xcc, cbMem);
6217#endif
6218#ifdef VBOX_STRICT
6219 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6220 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6221#endif
6222
6223 /*
6224 * Commit the bounce buffer entry.
6225 */
6226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6227 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6228 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6229 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6230 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6231 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6232 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6233 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6234 pVCpu->iem.s.cActiveMappings++;
6235
6236 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6237 *ppvMem = pbBuf;
6238 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6239 return VINF_SUCCESS;
6240}
6241
6242
6243
6244/**
6245 * Maps the specified guest memory for the given kind of access.
6246 *
6247 * This may be using bounce buffering of the memory if it's crossing a page
6248 * boundary or if there is an access handler installed for any of it. Because
6249 * of lock prefix guarantees, we're in for some extra clutter when this
6250 * happens.
6251 *
6252 * This may raise a \#GP, \#SS, \#PF or \#AC.
6253 *
6254 * @returns VBox strict status code.
6255 *
6256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6257 * @param ppvMem Where to return the pointer to the mapped memory.
6258 * @param pbUnmapInfo Where to return unmap info to be passed to
6259 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6260 * done.
6261 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6262 * 8, 12, 16, 32 or 512. When used by string operations
6263 * it can be up to a page.
6264 * @param iSegReg The index of the segment register to use for this
6265 * access. The base and limits are checked. Use UINT8_MAX
6266 * to indicate that no segmentation is required (for IDT,
6267 * GDT and LDT accesses).
6268 * @param GCPtrMem The address of the guest memory.
6269 * @param fAccess How the memory is being accessed. The
6270 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6271 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6272 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6273 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6274 * set.
6275 * @param uAlignCtl Alignment control:
6276 * - Bits 15:0 is the alignment mask.
6277 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6278 * IEM_MEMMAP_F_ALIGN_SSE, and
6279 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6280 * Pass zero to skip alignment.
6281 */
6282VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6283 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6284{
6285 /*
6286 * Check the input and figure out which mapping entry to use.
6287 */
6288 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6289 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6290 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6291 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6292 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6293
6294 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6295 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6296 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6297 {
6298 iMemMap = iemMemMapFindFree(pVCpu);
6299 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6300 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6301 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6302 pVCpu->iem.s.aMemMappings[2].fAccess),
6303 VERR_IEM_IPE_9);
6304 }
6305
6306 /*
6307 * Map the memory, checking that we can actually access it. If something
6308 * slightly complicated happens, fall back on bounce buffering.
6309 */
6310 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6311 if (rcStrict == VINF_SUCCESS)
6312 { /* likely */ }
6313 else
6314 return rcStrict;
6315
6316 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6317 { /* likely */ }
6318 else
6319 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6320
6321 /*
6322 * Alignment check.
6323 */
6324 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6325 { /* likelyish */ }
6326 else
6327 {
6328 /* Misaligned access. */
6329 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6330 {
6331 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6332 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6333 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6334 {
6335 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6336
6337 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6338 return iemRaiseAlignmentCheckException(pVCpu);
6339 }
6340 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6341 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6342 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6343 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6344 * that's what FXSAVE does on a 10980xe. */
6345 && iemMemAreAlignmentChecksEnabled(pVCpu))
6346 return iemRaiseAlignmentCheckException(pVCpu);
6347 else
6348 return iemRaiseGeneralProtectionFault0(pVCpu);
6349 }
6350
6351#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6352 /* If the access is atomic there are host platform alignmnet restrictions
6353 we need to conform with. */
6354 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6355# if defined(RT_ARCH_AMD64)
6356 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6357# elif defined(RT_ARCH_ARM64)
6358 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6359# else
6360# error port me
6361# endif
6362 )
6363 { /* okay */ }
6364 else
6365 {
6366 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6367 pVCpu->iem.s.cMisalignedAtomics += 1;
6368 return VINF_EM_EMULATE_SPLIT_LOCK;
6369 }
6370#endif
6371 }
6372
6373#ifdef IEM_WITH_DATA_TLB
6374 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6375
6376 /*
6377 * Get the TLB entry for this page.
6378 */
6379 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6380 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6381 if (pTlbe->uTag == uTag)
6382 {
6383# ifdef VBOX_WITH_STATISTICS
6384 pVCpu->iem.s.DataTlb.cTlbHits++;
6385# endif
6386 }
6387 else
6388 {
6389 pVCpu->iem.s.DataTlb.cTlbMisses++;
6390 PGMPTWALK Walk;
6391 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6392 if (RT_FAILURE(rc))
6393 {
6394 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6395# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6396 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6397 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6398# endif
6399 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6400 }
6401
6402 Assert(Walk.fSucceeded);
6403 pTlbe->uTag = uTag;
6404 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6405 pTlbe->GCPhys = Walk.GCPhys;
6406 pTlbe->pbMappingR3 = NULL;
6407 }
6408
6409 /*
6410 * Check TLB page table level access flags.
6411 */
6412 /* If the page is either supervisor only or non-writable, we need to do
6413 more careful access checks. */
6414 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6415 {
6416 /* Write to read only memory? */
6417 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6418 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6419 && ( ( IEM_GET_CPL(pVCpu) == 3
6420 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6421 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6422 {
6423 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6424# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6425 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6426 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6427# endif
6428 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6429 }
6430
6431 /* Kernel memory accessed by userland? */
6432 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6433 && IEM_GET_CPL(pVCpu) == 3
6434 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6435 {
6436 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6437# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6438 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6439 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6440# endif
6441 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6442 }
6443 }
6444
6445 /*
6446 * Set the dirty / access flags.
6447 * ASSUMES this is set when the address is translated rather than on commit...
6448 */
6449 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6450 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6451 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6452 {
6453 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6454 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6455 AssertRC(rc2);
6456 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6457 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6458 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6459 }
6460
6461 /*
6462 * Look up the physical page info if necessary.
6463 */
6464 uint8_t *pbMem = NULL;
6465 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6466# ifdef IN_RING3
6467 pbMem = pTlbe->pbMappingR3;
6468# else
6469 pbMem = NULL;
6470# endif
6471 else
6472 {
6473 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6474 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6475 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6476 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6477 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6478 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6479 { /* likely */ }
6480 else
6481 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6482 pTlbe->pbMappingR3 = NULL;
6483 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6484 | IEMTLBE_F_NO_MAPPINGR3
6485 | IEMTLBE_F_PG_NO_READ
6486 | IEMTLBE_F_PG_NO_WRITE
6487 | IEMTLBE_F_PG_UNASSIGNED
6488 | IEMTLBE_F_PG_CODE_PAGE);
6489 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6490 &pbMem, &pTlbe->fFlagsAndPhysRev);
6491 AssertRCReturn(rc, rc);
6492# ifdef IN_RING3
6493 pTlbe->pbMappingR3 = pbMem;
6494# endif
6495 }
6496
6497 /*
6498 * Check the physical page level access and mapping.
6499 */
6500 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6501 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6502 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6503 { /* probably likely */ }
6504 else
6505 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6506 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6507 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6508 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6509 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6510 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6511
6512 if (pbMem)
6513 {
6514 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6515 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6516 fAccess |= IEM_ACCESS_NOT_LOCKED;
6517 }
6518 else
6519 {
6520 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6521 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6522 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6523 if (rcStrict != VINF_SUCCESS)
6524 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6525 }
6526
6527 void * const pvMem = pbMem;
6528
6529 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6530 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6531 if (fAccess & IEM_ACCESS_TYPE_READ)
6532 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6533
6534#else /* !IEM_WITH_DATA_TLB */
6535
6536 RTGCPHYS GCPhysFirst;
6537 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6538 if (rcStrict != VINF_SUCCESS)
6539 return rcStrict;
6540
6541 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6542 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6543 if (fAccess & IEM_ACCESS_TYPE_READ)
6544 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6545
6546 void *pvMem;
6547 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6548 if (rcStrict != VINF_SUCCESS)
6549 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6550
6551#endif /* !IEM_WITH_DATA_TLB */
6552
6553 /*
6554 * Fill in the mapping table entry.
6555 */
6556 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6558 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6559 pVCpu->iem.s.cActiveMappings += 1;
6560
6561 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6562 *ppvMem = pvMem;
6563 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6564 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6565 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6566
6567 return VINF_SUCCESS;
6568}
6569
6570
6571/**
6572 * Commits the guest memory if bounce buffered and unmaps it.
6573 *
6574 * @returns Strict VBox status code.
6575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6576 * @param bUnmapInfo Unmap info set by iemMemMap.
6577 */
6578VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6579{
6580 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6581 AssertMsgReturn( (bUnmapInfo & 0x08)
6582 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6583 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6584 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6585 VERR_NOT_FOUND);
6586
6587 /* If it's bounce buffered, we may need to write back the buffer. */
6588 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6589 {
6590 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6591 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6592 }
6593 /* Otherwise unlock it. */
6594 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6595 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6596
6597 /* Free the entry. */
6598 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6599 Assert(pVCpu->iem.s.cActiveMappings != 0);
6600 pVCpu->iem.s.cActiveMappings--;
6601 return VINF_SUCCESS;
6602}
6603
6604
6605/**
6606 * Rolls back the guest memory (conceptually only) and unmaps it.
6607 *
6608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6609 * @param bUnmapInfo Unmap info set by iemMemMap.
6610 */
6611void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6612{
6613 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6614 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6615 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6616 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6617 == ((unsigned)bUnmapInfo >> 4),
6618 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6619
6620 /* Unlock it if necessary. */
6621 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6622 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6623
6624 /* Free the entry. */
6625 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6626 Assert(pVCpu->iem.s.cActiveMappings != 0);
6627 pVCpu->iem.s.cActiveMappings--;
6628}
6629
6630#ifdef IEM_WITH_SETJMP
6631
6632/**
6633 * Maps the specified guest memory for the given kind of access, longjmp on
6634 * error.
6635 *
6636 * This may be using bounce buffering of the memory if it's crossing a page
6637 * boundary or if there is an access handler installed for any of it. Because
6638 * of lock prefix guarantees, we're in for some extra clutter when this
6639 * happens.
6640 *
6641 * This may raise a \#GP, \#SS, \#PF or \#AC.
6642 *
6643 * @returns Pointer to the mapped memory.
6644 *
6645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6646 * @param bUnmapInfo Where to return unmap info to be passed to
6647 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6648 * iemMemCommitAndUnmapWoSafeJmp,
6649 * iemMemCommitAndUnmapRoSafeJmp,
6650 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6651 * when done.
6652 * @param cbMem The number of bytes to map. This is usually 1,
6653 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6654 * string operations it can be up to a page.
6655 * @param iSegReg The index of the segment register to use for
6656 * this access. The base and limits are checked.
6657 * Use UINT8_MAX to indicate that no segmentation
6658 * is required (for IDT, GDT and LDT accesses).
6659 * @param GCPtrMem The address of the guest memory.
6660 * @param fAccess How the memory is being accessed. The
6661 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6662 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6663 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6664 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6665 * set.
6666 * @param uAlignCtl Alignment control:
6667 * - Bits 15:0 is the alignment mask.
6668 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6669 * IEM_MEMMAP_F_ALIGN_SSE, and
6670 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6671 * Pass zero to skip alignment.
6672 */
6673void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6674 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6675{
6676 /*
6677 * Check the input, check segment access and adjust address
6678 * with segment base.
6679 */
6680 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6681 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6682 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6683
6684 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6685 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6686 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6687
6688 /*
6689 * Alignment check.
6690 */
6691 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6692 { /* likelyish */ }
6693 else
6694 {
6695 /* Misaligned access. */
6696 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6697 {
6698 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6699 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6700 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6701 {
6702 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6703
6704 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6705 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6706 }
6707 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6708 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6709 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6710 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6711 * that's what FXSAVE does on a 10980xe. */
6712 && iemMemAreAlignmentChecksEnabled(pVCpu))
6713 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6714 else
6715 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6716 }
6717
6718#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6719 /* If the access is atomic there are host platform alignmnet restrictions
6720 we need to conform with. */
6721 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6722# if defined(RT_ARCH_AMD64)
6723 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6724# elif defined(RT_ARCH_ARM64)
6725 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6726# else
6727# error port me
6728# endif
6729 )
6730 { /* okay */ }
6731 else
6732 {
6733 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6734 pVCpu->iem.s.cMisalignedAtomics += 1;
6735 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6736 }
6737#endif
6738 }
6739
6740 /*
6741 * Figure out which mapping entry to use.
6742 */
6743 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6744 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6745 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6746 {
6747 iMemMap = iemMemMapFindFree(pVCpu);
6748 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6749 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6750 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6751 pVCpu->iem.s.aMemMappings[2].fAccess),
6752 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6753 }
6754
6755 /*
6756 * Crossing a page boundary?
6757 */
6758 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6759 { /* No (likely). */ }
6760 else
6761 {
6762 void *pvMem;
6763 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6764 if (rcStrict == VINF_SUCCESS)
6765 return pvMem;
6766 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6767 }
6768
6769#ifdef IEM_WITH_DATA_TLB
6770 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6771
6772 /*
6773 * Get the TLB entry for this page.
6774 */
6775 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6776 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6777 if (pTlbe->uTag == uTag)
6778 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6779 else
6780 {
6781 pVCpu->iem.s.DataTlb.cTlbMisses++;
6782 PGMPTWALK Walk;
6783 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6784 if (RT_FAILURE(rc))
6785 {
6786 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6787# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6788 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6789 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6790# endif
6791 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6792 }
6793
6794 Assert(Walk.fSucceeded);
6795 pTlbe->uTag = uTag;
6796 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6797 pTlbe->GCPhys = Walk.GCPhys;
6798 pTlbe->pbMappingR3 = NULL;
6799 }
6800
6801 /*
6802 * Check the flags and physical revision.
6803 */
6804 /** @todo make the caller pass these in with fAccess. */
6805 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6806 ? IEMTLBE_F_PT_NO_USER : 0;
6807 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6808 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6809 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6810 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6811 ? IEMTLBE_F_PT_NO_WRITE : 0)
6812 : 0;
6813 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6814 uint8_t *pbMem = NULL;
6815 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6816 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6817# ifdef IN_RING3
6818 pbMem = pTlbe->pbMappingR3;
6819# else
6820 pbMem = NULL;
6821# endif
6822 else
6823 {
6824 /*
6825 * Okay, something isn't quite right or needs refreshing.
6826 */
6827 /* Write to read only memory? */
6828 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6829 {
6830 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6831# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6832 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6833 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6834# endif
6835 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6836 }
6837
6838 /* Kernel memory accessed by userland? */
6839 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6840 {
6841 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6842# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6843 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6844 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6845# endif
6846 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6847 }
6848
6849 /* Set the dirty / access flags.
6850 ASSUMES this is set when the address is translated rather than on commit... */
6851 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6852 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6853 {
6854 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6855 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6856 AssertRC(rc2);
6857 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6858 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6859 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6860 }
6861
6862 /*
6863 * Check if the physical page info needs updating.
6864 */
6865 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6866# ifdef IN_RING3
6867 pbMem = pTlbe->pbMappingR3;
6868# else
6869 pbMem = NULL;
6870# endif
6871 else
6872 {
6873 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6874 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6875 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6876 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6877 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6878 pTlbe->pbMappingR3 = NULL;
6879 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6880 | IEMTLBE_F_NO_MAPPINGR3
6881 | IEMTLBE_F_PG_NO_READ
6882 | IEMTLBE_F_PG_NO_WRITE
6883 | IEMTLBE_F_PG_UNASSIGNED
6884 | IEMTLBE_F_PG_CODE_PAGE);
6885 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6886 &pbMem, &pTlbe->fFlagsAndPhysRev);
6887 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6888# ifdef IN_RING3
6889 pTlbe->pbMappingR3 = pbMem;
6890# endif
6891 }
6892
6893 /*
6894 * Check the physical page level access and mapping.
6895 */
6896 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6897 { /* probably likely */ }
6898 else
6899 {
6900 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6901 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6902 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6903 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6904 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6905 if (rcStrict == VINF_SUCCESS)
6906 return pbMem;
6907 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6908 }
6909 }
6910 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6911
6912 if (pbMem)
6913 {
6914 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6915 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6916 fAccess |= IEM_ACCESS_NOT_LOCKED;
6917 }
6918 else
6919 {
6920 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6921 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6922 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6923 if (rcStrict == VINF_SUCCESS)
6924 {
6925 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6926 return pbMem;
6927 }
6928 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6929 }
6930
6931 void * const pvMem = pbMem;
6932
6933 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6934 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6935 if (fAccess & IEM_ACCESS_TYPE_READ)
6936 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6937
6938#else /* !IEM_WITH_DATA_TLB */
6939
6940
6941 RTGCPHYS GCPhysFirst;
6942 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6943 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6944 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6945
6946 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6947 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6948 if (fAccess & IEM_ACCESS_TYPE_READ)
6949 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6950
6951 void *pvMem;
6952 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6953 if (rcStrict == VINF_SUCCESS)
6954 { /* likely */ }
6955 else
6956 {
6957 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6958 if (rcStrict == VINF_SUCCESS)
6959 return pvMem;
6960 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6961 }
6962
6963#endif /* !IEM_WITH_DATA_TLB */
6964
6965 /*
6966 * Fill in the mapping table entry.
6967 */
6968 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6969 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6970 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6971 pVCpu->iem.s.cActiveMappings++;
6972
6973 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6974
6975 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6976 return pvMem;
6977}
6978
6979
6980/**
6981 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6982 *
6983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6984 * @param pvMem The mapping.
6985 * @param fAccess The kind of access.
6986 */
6987void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6988{
6989 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6990 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6991 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6992 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6993 == ((unsigned)bUnmapInfo >> 4),
6994 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6995
6996 /* If it's bounce buffered, we may need to write back the buffer. */
6997 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6998 {
6999 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7000 {
7001 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7002 if (rcStrict == VINF_SUCCESS)
7003 return;
7004 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7005 }
7006 }
7007 /* Otherwise unlock it. */
7008 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7009 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7010
7011 /* Free the entry. */
7012 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7013 Assert(pVCpu->iem.s.cActiveMappings != 0);
7014 pVCpu->iem.s.cActiveMappings--;
7015}
7016
7017
7018/** Fallback for iemMemCommitAndUnmapRwJmp. */
7019void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7020{
7021 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7022 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7023}
7024
7025
7026/** Fallback for iemMemCommitAndUnmapAtJmp. */
7027void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7028{
7029 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7030 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7031}
7032
7033
7034/** Fallback for iemMemCommitAndUnmapWoJmp. */
7035void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7036{
7037 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7038 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7039}
7040
7041
7042/** Fallback for iemMemCommitAndUnmapRoJmp. */
7043void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7044{
7045 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7046 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7047}
7048
7049
7050/** Fallback for iemMemRollbackAndUnmapWo. */
7051void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7052{
7053 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7054 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7055}
7056
7057#endif /* IEM_WITH_SETJMP */
7058
7059#ifndef IN_RING3
7060/**
7061 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7062 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7063 *
7064 * Allows the instruction to be completed and retired, while the IEM user will
7065 * return to ring-3 immediately afterwards and do the postponed writes there.
7066 *
7067 * @returns VBox status code (no strict statuses). Caller must check
7068 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7070 * @param pvMem The mapping.
7071 * @param fAccess The kind of access.
7072 */
7073VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7074{
7075 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7076 AssertMsgReturn( (bUnmapInfo & 0x08)
7077 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7078 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7079 == ((unsigned)bUnmapInfo >> 4),
7080 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7081 VERR_NOT_FOUND);
7082
7083 /* If it's bounce buffered, we may need to write back the buffer. */
7084 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7085 {
7086 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7087 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7088 }
7089 /* Otherwise unlock it. */
7090 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7091 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7092
7093 /* Free the entry. */
7094 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7095 Assert(pVCpu->iem.s.cActiveMappings != 0);
7096 pVCpu->iem.s.cActiveMappings--;
7097 return VINF_SUCCESS;
7098}
7099#endif
7100
7101
7102/**
7103 * Rollbacks mappings, releasing page locks and such.
7104 *
7105 * The caller shall only call this after checking cActiveMappings.
7106 *
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 */
7109void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7110{
7111 Assert(pVCpu->iem.s.cActiveMappings > 0);
7112
7113 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7114 while (iMemMap-- > 0)
7115 {
7116 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7117 if (fAccess != IEM_ACCESS_INVALID)
7118 {
7119 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7120 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7121 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7122 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7123 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7124 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7125 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7126 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7127 pVCpu->iem.s.cActiveMappings--;
7128 }
7129 }
7130}
7131
7132
7133/*
7134 * Instantiate R/W templates.
7135 */
7136#define TMPL_MEM_WITH_STACK
7137
7138#define TMPL_MEM_TYPE uint8_t
7139#define TMPL_MEM_FN_SUFF U8
7140#define TMPL_MEM_FMT_TYPE "%#04x"
7141#define TMPL_MEM_FMT_DESC "byte"
7142#include "IEMAllMemRWTmpl.cpp.h"
7143
7144#define TMPL_MEM_TYPE uint16_t
7145#define TMPL_MEM_FN_SUFF U16
7146#define TMPL_MEM_FMT_TYPE "%#06x"
7147#define TMPL_MEM_FMT_DESC "word"
7148#include "IEMAllMemRWTmpl.cpp.h"
7149
7150#define TMPL_WITH_PUSH_SREG
7151#define TMPL_MEM_TYPE uint32_t
7152#define TMPL_MEM_FN_SUFF U32
7153#define TMPL_MEM_FMT_TYPE "%#010x"
7154#define TMPL_MEM_FMT_DESC "dword"
7155#include "IEMAllMemRWTmpl.cpp.h"
7156#undef TMPL_WITH_PUSH_SREG
7157
7158#define TMPL_MEM_TYPE uint64_t
7159#define TMPL_MEM_FN_SUFF U64
7160#define TMPL_MEM_FMT_TYPE "%#018RX64"
7161#define TMPL_MEM_FMT_DESC "qword"
7162#include "IEMAllMemRWTmpl.cpp.h"
7163
7164#undef TMPL_MEM_WITH_STACK
7165
7166#define TMPL_MEM_TYPE uint64_t
7167#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7168#define TMPL_MEM_FN_SUFF U64AlignedU128
7169#define TMPL_MEM_FMT_TYPE "%#018RX64"
7170#define TMPL_MEM_FMT_DESC "qword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172
7173/* See IEMAllMemRWTmplInline.cpp.h */
7174#define TMPL_MEM_BY_REF
7175
7176#define TMPL_MEM_TYPE RTFLOAT80U
7177#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7178#define TMPL_MEM_FN_SUFF R80
7179#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7180#define TMPL_MEM_FMT_DESC "tword"
7181#include "IEMAllMemRWTmpl.cpp.h"
7182
7183#define TMPL_MEM_TYPE RTPBCD80U
7184#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7185#define TMPL_MEM_FN_SUFF D80
7186#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7187#define TMPL_MEM_FMT_DESC "tword"
7188#include "IEMAllMemRWTmpl.cpp.h"
7189
7190#define TMPL_MEM_TYPE RTUINT128U
7191#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7192#define TMPL_MEM_FN_SUFF U128
7193#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7194#define TMPL_MEM_FMT_DESC "dqword"
7195#include "IEMAllMemRWTmpl.cpp.h"
7196
7197#define TMPL_MEM_TYPE RTUINT128U
7198#define TMPL_MEM_TYPE_ALIGN 0
7199#define TMPL_MEM_FN_SUFF U128NoAc
7200#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7201#define TMPL_MEM_FMT_DESC "dqword"
7202#include "IEMAllMemRWTmpl.cpp.h"
7203
7204/**
7205 * Fetches a data dword and zero extends it to a qword.
7206 *
7207 * @returns Strict VBox status code.
7208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7209 * @param pu64Dst Where to return the qword.
7210 * @param iSegReg The index of the segment register to use for
7211 * this access. The base and limits are checked.
7212 * @param GCPtrMem The address of the guest memory.
7213 */
7214VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7215{
7216 /* The lazy approach for now... */
7217 uint8_t bUnmapInfo;
7218 uint32_t const *pu32Src;
7219 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7220 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7221 if (rc == VINF_SUCCESS)
7222 {
7223 *pu64Dst = *pu32Src;
7224 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7225 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7226 }
7227 return rc;
7228}
7229
7230
7231#ifdef SOME_UNUSED_FUNCTION
7232/**
7233 * Fetches a data dword and sign extends it to a qword.
7234 *
7235 * @returns Strict VBox status code.
7236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7237 * @param pu64Dst Where to return the sign extended value.
7238 * @param iSegReg The index of the segment register to use for
7239 * this access. The base and limits are checked.
7240 * @param GCPtrMem The address of the guest memory.
7241 */
7242VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7243{
7244 /* The lazy approach for now... */
7245 uint8_t bUnmapInfo;
7246 int32_t const *pi32Src;
7247 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7248 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7249 if (rc == VINF_SUCCESS)
7250 {
7251 *pu64Dst = *pi32Src;
7252 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7253 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7254 }
7255#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7256 else
7257 *pu64Dst = 0;
7258#endif
7259 return rc;
7260}
7261#endif
7262
7263
7264/**
7265 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7266 * related.
7267 *
7268 * Raises \#GP(0) if not aligned.
7269 *
7270 * @returns Strict VBox status code.
7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7272 * @param pu128Dst Where to return the qword.
7273 * @param iSegReg The index of the segment register to use for
7274 * this access. The base and limits are checked.
7275 * @param GCPtrMem The address of the guest memory.
7276 */
7277VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7278{
7279 /* The lazy approach for now... */
7280 uint8_t bUnmapInfo;
7281 PCRTUINT128U pu128Src;
7282 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7283 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7284 if (rc == VINF_SUCCESS)
7285 {
7286 pu128Dst->au64[0] = pu128Src->au64[0];
7287 pu128Dst->au64[1] = pu128Src->au64[1];
7288 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7289 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7290 }
7291 return rc;
7292}
7293
7294
7295#ifdef IEM_WITH_SETJMP
7296/**
7297 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7298 * related, longjmp on error.
7299 *
7300 * Raises \#GP(0) if not aligned.
7301 *
7302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7303 * @param pu128Dst Where to return the qword.
7304 * @param iSegReg The index of the segment register to use for
7305 * this access. The base and limits are checked.
7306 * @param GCPtrMem The address of the guest memory.
7307 */
7308void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7309 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7310{
7311 /* The lazy approach for now... */
7312 uint8_t bUnmapInfo;
7313 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7314 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7315 pu128Dst->au64[0] = pu128Src->au64[0];
7316 pu128Dst->au64[1] = pu128Src->au64[1];
7317 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7318 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7319}
7320#endif
7321
7322
7323/**
7324 * Fetches a data oword (octo word), generally AVX related.
7325 *
7326 * @returns Strict VBox status code.
7327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7328 * @param pu256Dst Where to return the qword.
7329 * @param iSegReg The index of the segment register to use for
7330 * this access. The base and limits are checked.
7331 * @param GCPtrMem The address of the guest memory.
7332 */
7333VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7334{
7335 /* The lazy approach for now... */
7336 uint8_t bUnmapInfo;
7337 PCRTUINT256U pu256Src;
7338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7339 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7340 if (rc == VINF_SUCCESS)
7341 {
7342 pu256Dst->au64[0] = pu256Src->au64[0];
7343 pu256Dst->au64[1] = pu256Src->au64[1];
7344 pu256Dst->au64[2] = pu256Src->au64[2];
7345 pu256Dst->au64[3] = pu256Src->au64[3];
7346 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7347 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7348 }
7349 return rc;
7350}
7351
7352
7353#ifdef IEM_WITH_SETJMP
7354/**
7355 * Fetches a data oword (octo word), generally AVX related.
7356 *
7357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7358 * @param pu256Dst Where to return the qword.
7359 * @param iSegReg The index of the segment register to use for
7360 * this access. The base and limits are checked.
7361 * @param GCPtrMem The address of the guest memory.
7362 */
7363void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7364{
7365 /* The lazy approach for now... */
7366 uint8_t bUnmapInfo;
7367 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7368 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7369 pu256Dst->au64[0] = pu256Src->au64[0];
7370 pu256Dst->au64[1] = pu256Src->au64[1];
7371 pu256Dst->au64[2] = pu256Src->au64[2];
7372 pu256Dst->au64[3] = pu256Src->au64[3];
7373 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7374 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7375}
7376#endif
7377
7378
7379/**
7380 * Fetches a data oword (octo word) at an aligned address, generally AVX
7381 * related.
7382 *
7383 * Raises \#GP(0) if not aligned.
7384 *
7385 * @returns Strict VBox status code.
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param pu256Dst Where to return the qword.
7388 * @param iSegReg The index of the segment register to use for
7389 * this access. The base and limits are checked.
7390 * @param GCPtrMem The address of the guest memory.
7391 */
7392VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7393{
7394 /* The lazy approach for now... */
7395 uint8_t bUnmapInfo;
7396 PCRTUINT256U pu256Src;
7397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7398 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7399 if (rc == VINF_SUCCESS)
7400 {
7401 pu256Dst->au64[0] = pu256Src->au64[0];
7402 pu256Dst->au64[1] = pu256Src->au64[1];
7403 pu256Dst->au64[2] = pu256Src->au64[2];
7404 pu256Dst->au64[3] = pu256Src->au64[3];
7405 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7406 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7407 }
7408 return rc;
7409}
7410
7411
7412#ifdef IEM_WITH_SETJMP
7413/**
7414 * Fetches a data oword (octo word) at an aligned address, generally AVX
7415 * related, longjmp on error.
7416 *
7417 * Raises \#GP(0) if not aligned.
7418 *
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param pu256Dst Where to return the qword.
7421 * @param iSegReg The index of the segment register to use for
7422 * this access. The base and limits are checked.
7423 * @param GCPtrMem The address of the guest memory.
7424 */
7425void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7426 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7427{
7428 /* The lazy approach for now... */
7429 uint8_t bUnmapInfo;
7430 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7431 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7432 pu256Dst->au64[0] = pu256Src->au64[0];
7433 pu256Dst->au64[1] = pu256Src->au64[1];
7434 pu256Dst->au64[2] = pu256Src->au64[2];
7435 pu256Dst->au64[3] = pu256Src->au64[3];
7436 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7437 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7438}
7439#endif
7440
7441
7442
7443/**
7444 * Fetches a descriptor register (lgdt, lidt).
7445 *
7446 * @returns Strict VBox status code.
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param pcbLimit Where to return the limit.
7449 * @param pGCPtrBase Where to return the base.
7450 * @param iSegReg The index of the segment register to use for
7451 * this access. The base and limits are checked.
7452 * @param GCPtrMem The address of the guest memory.
7453 * @param enmOpSize The effective operand size.
7454 */
7455VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7456 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7457{
7458 /*
7459 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7460 * little special:
7461 * - The two reads are done separately.
7462 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7463 * - We suspect the 386 to actually commit the limit before the base in
7464 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7465 * don't try emulate this eccentric behavior, because it's not well
7466 * enough understood and rather hard to trigger.
7467 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7468 */
7469 VBOXSTRICTRC rcStrict;
7470 if (IEM_IS_64BIT_CODE(pVCpu))
7471 {
7472 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7473 if (rcStrict == VINF_SUCCESS)
7474 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7475 }
7476 else
7477 {
7478 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7479 if (enmOpSize == IEMMODE_32BIT)
7480 {
7481 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7482 {
7483 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7484 if (rcStrict == VINF_SUCCESS)
7485 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7486 }
7487 else
7488 {
7489 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7490 if (rcStrict == VINF_SUCCESS)
7491 {
7492 *pcbLimit = (uint16_t)uTmp;
7493 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7494 }
7495 }
7496 if (rcStrict == VINF_SUCCESS)
7497 *pGCPtrBase = uTmp;
7498 }
7499 else
7500 {
7501 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7502 if (rcStrict == VINF_SUCCESS)
7503 {
7504 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7505 if (rcStrict == VINF_SUCCESS)
7506 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7507 }
7508 }
7509 }
7510 return rcStrict;
7511}
7512
7513
7514/**
7515 * Stores a data dqword, SSE aligned.
7516 *
7517 * @returns Strict VBox status code.
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param iSegReg The index of the segment register to use for
7520 * this access. The base and limits are checked.
7521 * @param GCPtrMem The address of the guest memory.
7522 * @param u128Value The value to store.
7523 */
7524VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7525{
7526 /* The lazy approach for now... */
7527 uint8_t bUnmapInfo;
7528 PRTUINT128U pu128Dst;
7529 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7530 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7531 if (rc == VINF_SUCCESS)
7532 {
7533 pu128Dst->au64[0] = u128Value.au64[0];
7534 pu128Dst->au64[1] = u128Value.au64[1];
7535 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7536 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7537 }
7538 return rc;
7539}
7540
7541
7542#ifdef IEM_WITH_SETJMP
7543/**
7544 * Stores a data dqword, SSE aligned.
7545 *
7546 * @returns Strict VBox status code.
7547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7548 * @param iSegReg The index of the segment register to use for
7549 * this access. The base and limits are checked.
7550 * @param GCPtrMem The address of the guest memory.
7551 * @param u128Value The value to store.
7552 */
7553void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7554 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7555{
7556 /* The lazy approach for now... */
7557 uint8_t bUnmapInfo;
7558 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7559 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7560 pu128Dst->au64[0] = u128Value.au64[0];
7561 pu128Dst->au64[1] = u128Value.au64[1];
7562 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7563 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7564}
7565#endif
7566
7567
7568/**
7569 * Stores a data dqword.
7570 *
7571 * @returns Strict VBox status code.
7572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7573 * @param iSegReg The index of the segment register to use for
7574 * this access. The base and limits are checked.
7575 * @param GCPtrMem The address of the guest memory.
7576 * @param pu256Value Pointer to the value to store.
7577 */
7578VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7579{
7580 /* The lazy approach for now... */
7581 uint8_t bUnmapInfo;
7582 PRTUINT256U pu256Dst;
7583 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7584 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7585 if (rc == VINF_SUCCESS)
7586 {
7587 pu256Dst->au64[0] = pu256Value->au64[0];
7588 pu256Dst->au64[1] = pu256Value->au64[1];
7589 pu256Dst->au64[2] = pu256Value->au64[2];
7590 pu256Dst->au64[3] = pu256Value->au64[3];
7591 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7592 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7593 }
7594 return rc;
7595}
7596
7597
7598#ifdef IEM_WITH_SETJMP
7599/**
7600 * Stores a data dqword, longjmp on error.
7601 *
7602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7603 * @param iSegReg The index of the segment register to use for
7604 * this access. The base and limits are checked.
7605 * @param GCPtrMem The address of the guest memory.
7606 * @param pu256Value Pointer to the value to store.
7607 */
7608void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7609{
7610 /* The lazy approach for now... */
7611 uint8_t bUnmapInfo;
7612 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7613 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7614 pu256Dst->au64[0] = pu256Value->au64[0];
7615 pu256Dst->au64[1] = pu256Value->au64[1];
7616 pu256Dst->au64[2] = pu256Value->au64[2];
7617 pu256Dst->au64[3] = pu256Value->au64[3];
7618 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7619 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7620}
7621#endif
7622
7623
7624/**
7625 * Stores a data qqword.
7626 *
7627 * @returns Strict VBox status code.
7628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7629 * @param iSegReg The index of the segment register to use for
7630 * this access. The base and limits are checked.
7631 * @param GCPtrMem The address of the guest memory.
7632 * @param pu256Value Pointer to the value to store.
7633 */
7634VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7635{
7636 /* The lazy approach for now... */
7637 uint8_t bUnmapInfo;
7638 PRTUINT256U pu256Dst;
7639 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7640 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7641 if (rc == VINF_SUCCESS)
7642 {
7643 pu256Dst->au64[0] = pu256Value->au64[0];
7644 pu256Dst->au64[1] = pu256Value->au64[1];
7645 pu256Dst->au64[2] = pu256Value->au64[2];
7646 pu256Dst->au64[3] = pu256Value->au64[3];
7647 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7648 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7649 }
7650 return rc;
7651}
7652
7653
7654#ifdef IEM_WITH_SETJMP
7655/**
7656 * Stores a data qqword, longjmp on error.
7657 *
7658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7659 * @param iSegReg The index of the segment register to use for
7660 * this access. The base and limits are checked.
7661 * @param GCPtrMem The address of the guest memory.
7662 * @param pu256Value Pointer to the value to store.
7663 */
7664void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7665{
7666 /* The lazy approach for now... */
7667 uint8_t bUnmapInfo;
7668 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7669 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7670 pu256Dst->au64[0] = pu256Value->au64[0];
7671 pu256Dst->au64[1] = pu256Value->au64[1];
7672 pu256Dst->au64[2] = pu256Value->au64[2];
7673 pu256Dst->au64[3] = pu256Value->au64[3];
7674 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7675 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7676}
7677#endif
7678
7679
7680/**
7681 * Stores a data dqword, AVX \#GP(0) aligned.
7682 *
7683 * @returns Strict VBox status code.
7684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7685 * @param iSegReg The index of the segment register to use for
7686 * this access. The base and limits are checked.
7687 * @param GCPtrMem The address of the guest memory.
7688 * @param pu256Value Pointer to the value to store.
7689 */
7690VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7691{
7692 /* The lazy approach for now... */
7693 uint8_t bUnmapInfo;
7694 PRTUINT256U pu256Dst;
7695 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7696 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7697 if (rc == VINF_SUCCESS)
7698 {
7699 pu256Dst->au64[0] = pu256Value->au64[0];
7700 pu256Dst->au64[1] = pu256Value->au64[1];
7701 pu256Dst->au64[2] = pu256Value->au64[2];
7702 pu256Dst->au64[3] = pu256Value->au64[3];
7703 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7704 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7705 }
7706 return rc;
7707}
7708
7709
7710#ifdef IEM_WITH_SETJMP
7711/**
7712 * Stores a data dqword, AVX aligned.
7713 *
7714 * @returns Strict VBox status code.
7715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7716 * @param iSegReg The index of the segment register to use for
7717 * this access. The base and limits are checked.
7718 * @param GCPtrMem The address of the guest memory.
7719 * @param pu256Value Pointer to the value to store.
7720 */
7721void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7722 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7723{
7724 /* The lazy approach for now... */
7725 uint8_t bUnmapInfo;
7726 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7727 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7728 pu256Dst->au64[0] = pu256Value->au64[0];
7729 pu256Dst->au64[1] = pu256Value->au64[1];
7730 pu256Dst->au64[2] = pu256Value->au64[2];
7731 pu256Dst->au64[3] = pu256Value->au64[3];
7732 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7733 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7734}
7735#endif
7736
7737
7738/**
7739 * Stores a descriptor register (sgdt, sidt).
7740 *
7741 * @returns Strict VBox status code.
7742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7743 * @param cbLimit The limit.
7744 * @param GCPtrBase The base address.
7745 * @param iSegReg The index of the segment register to use for
7746 * this access. The base and limits are checked.
7747 * @param GCPtrMem The address of the guest memory.
7748 */
7749VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7750{
7751 /*
7752 * The SIDT and SGDT instructions actually stores the data using two
7753 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7754 * does not respond to opsize prefixes.
7755 */
7756 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7757 if (rcStrict == VINF_SUCCESS)
7758 {
7759 if (IEM_IS_16BIT_CODE(pVCpu))
7760 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7761 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7762 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7763 else if (IEM_IS_32BIT_CODE(pVCpu))
7764 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7765 else
7766 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7767 }
7768 return rcStrict;
7769}
7770
7771
7772/**
7773 * Begin a special stack push (used by interrupt, exceptions and such).
7774 *
7775 * This will raise \#SS or \#PF if appropriate.
7776 *
7777 * @returns Strict VBox status code.
7778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7779 * @param cbMem The number of bytes to push onto the stack.
7780 * @param cbAlign The alignment mask (7, 3, 1).
7781 * @param ppvMem Where to return the pointer to the stack memory.
7782 * As with the other memory functions this could be
7783 * direct access or bounce buffered access, so
7784 * don't commit register until the commit call
7785 * succeeds.
7786 * @param pbUnmapInfo Where to store unmap info for
7787 * iemMemStackPushCommitSpecial.
7788 * @param puNewRsp Where to return the new RSP value. This must be
7789 * passed unchanged to
7790 * iemMemStackPushCommitSpecial().
7791 */
7792VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7793 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7794{
7795 Assert(cbMem < UINT8_MAX);
7796 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7797 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7798}
7799
7800
7801/**
7802 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7803 *
7804 * This will update the rSP.
7805 *
7806 * @returns Strict VBox status code.
7807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7808 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7809 * @param uNewRsp The new RSP value returned by
7810 * iemMemStackPushBeginSpecial().
7811 */
7812VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7813{
7814 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7815 if (rcStrict == VINF_SUCCESS)
7816 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7817 return rcStrict;
7818}
7819
7820
7821/**
7822 * Begin a special stack pop (used by iret, retf and such).
7823 *
7824 * This will raise \#SS or \#PF if appropriate.
7825 *
7826 * @returns Strict VBox status code.
7827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7828 * @param cbMem The number of bytes to pop from the stack.
7829 * @param cbAlign The alignment mask (7, 3, 1).
7830 * @param ppvMem Where to return the pointer to the stack memory.
7831 * @param pbUnmapInfo Where to store unmap info for
7832 * iemMemStackPopDoneSpecial.
7833 * @param puNewRsp Where to return the new RSP value. This must be
7834 * assigned to CPUMCTX::rsp manually some time
7835 * after iemMemStackPopDoneSpecial() has been
7836 * called.
7837 */
7838VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7839 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7840{
7841 Assert(cbMem < UINT8_MAX);
7842 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7843 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7844}
7845
7846
7847/**
7848 * Continue a special stack pop (used by iret and retf), for the purpose of
7849 * retrieving a new stack pointer.
7850 *
7851 * This will raise \#SS or \#PF if appropriate.
7852 *
7853 * @returns Strict VBox status code.
7854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7855 * @param off Offset from the top of the stack. This is zero
7856 * except in the retf case.
7857 * @param cbMem The number of bytes to pop from the stack.
7858 * @param ppvMem Where to return the pointer to the stack memory.
7859 * @param pbUnmapInfo Where to store unmap info for
7860 * iemMemStackPopDoneSpecial.
7861 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7862 * return this because all use of this function is
7863 * to retrieve a new value and anything we return
7864 * here would be discarded.)
7865 */
7866VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7867 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7868{
7869 Assert(cbMem < UINT8_MAX);
7870
7871 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7872 RTGCPTR GCPtrTop;
7873 if (IEM_IS_64BIT_CODE(pVCpu))
7874 GCPtrTop = uCurNewRsp;
7875 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7876 GCPtrTop = (uint32_t)uCurNewRsp;
7877 else
7878 GCPtrTop = (uint16_t)uCurNewRsp;
7879
7880 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7881 0 /* checked in iemMemStackPopBeginSpecial */);
7882}
7883
7884
7885/**
7886 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7887 * iemMemStackPopContinueSpecial).
7888 *
7889 * The caller will manually commit the rSP.
7890 *
7891 * @returns Strict VBox status code.
7892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7893 * @param bUnmapInfo Unmap information returned by
7894 * iemMemStackPopBeginSpecial() or
7895 * iemMemStackPopContinueSpecial().
7896 */
7897VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7898{
7899 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7900}
7901
7902
7903/**
7904 * Fetches a system table byte.
7905 *
7906 * @returns Strict VBox status code.
7907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7908 * @param pbDst Where to return the byte.
7909 * @param iSegReg The index of the segment register to use for
7910 * this access. The base and limits are checked.
7911 * @param GCPtrMem The address of the guest memory.
7912 */
7913VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7914{
7915 /* The lazy approach for now... */
7916 uint8_t bUnmapInfo;
7917 uint8_t const *pbSrc;
7918 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7919 if (rc == VINF_SUCCESS)
7920 {
7921 *pbDst = *pbSrc;
7922 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7923 }
7924 return rc;
7925}
7926
7927
7928/**
7929 * Fetches a system table word.
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7933 * @param pu16Dst Where to return the word.
7934 * @param iSegReg The index of the segment register to use for
7935 * this access. The base and limits are checked.
7936 * @param GCPtrMem The address of the guest memory.
7937 */
7938VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7939{
7940 /* The lazy approach for now... */
7941 uint8_t bUnmapInfo;
7942 uint16_t const *pu16Src;
7943 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7944 if (rc == VINF_SUCCESS)
7945 {
7946 *pu16Dst = *pu16Src;
7947 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7948 }
7949 return rc;
7950}
7951
7952
7953/**
7954 * Fetches a system table dword.
7955 *
7956 * @returns Strict VBox status code.
7957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7958 * @param pu32Dst Where to return the dword.
7959 * @param iSegReg The index of the segment register to use for
7960 * this access. The base and limits are checked.
7961 * @param GCPtrMem The address of the guest memory.
7962 */
7963VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7964{
7965 /* The lazy approach for now... */
7966 uint8_t bUnmapInfo;
7967 uint32_t const *pu32Src;
7968 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7969 if (rc == VINF_SUCCESS)
7970 {
7971 *pu32Dst = *pu32Src;
7972 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7973 }
7974 return rc;
7975}
7976
7977
7978/**
7979 * Fetches a system table qword.
7980 *
7981 * @returns Strict VBox status code.
7982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7983 * @param pu64Dst Where to return the qword.
7984 * @param iSegReg The index of the segment register to use for
7985 * this access. The base and limits are checked.
7986 * @param GCPtrMem The address of the guest memory.
7987 */
7988VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7989{
7990 /* The lazy approach for now... */
7991 uint8_t bUnmapInfo;
7992 uint64_t const *pu64Src;
7993 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7994 if (rc == VINF_SUCCESS)
7995 {
7996 *pu64Dst = *pu64Src;
7997 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7998 }
7999 return rc;
8000}
8001
8002
8003/**
8004 * Fetches a descriptor table entry with caller specified error code.
8005 *
8006 * @returns Strict VBox status code.
8007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8008 * @param pDesc Where to return the descriptor table entry.
8009 * @param uSel The selector which table entry to fetch.
8010 * @param uXcpt The exception to raise on table lookup error.
8011 * @param uErrorCode The error code associated with the exception.
8012 */
8013static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8014 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8015{
8016 AssertPtr(pDesc);
8017 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8018
8019 /** @todo did the 286 require all 8 bytes to be accessible? */
8020 /*
8021 * Get the selector table base and check bounds.
8022 */
8023 RTGCPTR GCPtrBase;
8024 if (uSel & X86_SEL_LDT)
8025 {
8026 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8027 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8028 {
8029 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8030 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8031 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8032 uErrorCode, 0);
8033 }
8034
8035 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8036 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8037 }
8038 else
8039 {
8040 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8041 {
8042 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8043 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8044 uErrorCode, 0);
8045 }
8046 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8047 }
8048
8049 /*
8050 * Read the legacy descriptor and maybe the long mode extensions if
8051 * required.
8052 */
8053 VBOXSTRICTRC rcStrict;
8054 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8055 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8056 else
8057 {
8058 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8059 if (rcStrict == VINF_SUCCESS)
8060 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8061 if (rcStrict == VINF_SUCCESS)
8062 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8063 if (rcStrict == VINF_SUCCESS)
8064 pDesc->Legacy.au16[3] = 0;
8065 else
8066 return rcStrict;
8067 }
8068
8069 if (rcStrict == VINF_SUCCESS)
8070 {
8071 if ( !IEM_IS_LONG_MODE(pVCpu)
8072 || pDesc->Legacy.Gen.u1DescType)
8073 pDesc->Long.au64[1] = 0;
8074 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8075 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8076 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8077 else
8078 {
8079 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8080 /** @todo is this the right exception? */
8081 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8082 }
8083 }
8084 return rcStrict;
8085}
8086
8087
8088/**
8089 * Fetches a descriptor table entry.
8090 *
8091 * @returns Strict VBox status code.
8092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8093 * @param pDesc Where to return the descriptor table entry.
8094 * @param uSel The selector which table entry to fetch.
8095 * @param uXcpt The exception to raise on table lookup error.
8096 */
8097VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8098{
8099 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8100}
8101
8102
8103/**
8104 * Marks the selector descriptor as accessed (only non-system descriptors).
8105 *
8106 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8107 * will therefore skip the limit checks.
8108 *
8109 * @returns Strict VBox status code.
8110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8111 * @param uSel The selector.
8112 */
8113VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8114{
8115 /*
8116 * Get the selector table base and calculate the entry address.
8117 */
8118 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8119 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8120 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8121 GCPtr += uSel & X86_SEL_MASK;
8122
8123 /*
8124 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8125 * ugly stuff to avoid this. This will make sure it's an atomic access
8126 * as well more or less remove any question about 8-bit or 32-bit accesss.
8127 */
8128 VBOXSTRICTRC rcStrict;
8129 uint8_t bUnmapInfo;
8130 uint32_t volatile *pu32;
8131 if ((GCPtr & 3) == 0)
8132 {
8133 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8134 GCPtr += 2 + 2;
8135 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8136 if (rcStrict != VINF_SUCCESS)
8137 return rcStrict;
8138 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8139 }
8140 else
8141 {
8142 /* The misaligned GDT/LDT case, map the whole thing. */
8143 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8144 if (rcStrict != VINF_SUCCESS)
8145 return rcStrict;
8146 switch ((uintptr_t)pu32 & 3)
8147 {
8148 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8149 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8150 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8151 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8152 }
8153 }
8154
8155 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8156}
8157
8158
8159#undef LOG_GROUP
8160#define LOG_GROUP LOG_GROUP_IEM
8161
8162/** @} */
8163
8164/** @name Opcode Helpers.
8165 * @{
8166 */
8167
8168/**
8169 * Calculates the effective address of a ModR/M memory operand.
8170 *
8171 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8172 *
8173 * @return Strict VBox status code.
8174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8175 * @param bRm The ModRM byte.
8176 * @param cbImmAndRspOffset - First byte: The size of any immediate
8177 * following the effective address opcode bytes
8178 * (only for RIP relative addressing).
8179 * - Second byte: RSP displacement (for POP [ESP]).
8180 * @param pGCPtrEff Where to return the effective address.
8181 */
8182VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8183{
8184 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8185# define SET_SS_DEF() \
8186 do \
8187 { \
8188 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8189 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8190 } while (0)
8191
8192 if (!IEM_IS_64BIT_CODE(pVCpu))
8193 {
8194/** @todo Check the effective address size crap! */
8195 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8196 {
8197 uint16_t u16EffAddr;
8198
8199 /* Handle the disp16 form with no registers first. */
8200 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8201 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8202 else
8203 {
8204 /* Get the displacment. */
8205 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8206 {
8207 case 0: u16EffAddr = 0; break;
8208 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8209 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8210 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8211 }
8212
8213 /* Add the base and index registers to the disp. */
8214 switch (bRm & X86_MODRM_RM_MASK)
8215 {
8216 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8217 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8218 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8219 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8220 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8221 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8222 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8223 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8224 }
8225 }
8226
8227 *pGCPtrEff = u16EffAddr;
8228 }
8229 else
8230 {
8231 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8232 uint32_t u32EffAddr;
8233
8234 /* Handle the disp32 form with no registers first. */
8235 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8236 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8237 else
8238 {
8239 /* Get the register (or SIB) value. */
8240 switch ((bRm & X86_MODRM_RM_MASK))
8241 {
8242 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8243 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8244 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8245 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8246 case 4: /* SIB */
8247 {
8248 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8249
8250 /* Get the index and scale it. */
8251 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8252 {
8253 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8254 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8255 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8256 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8257 case 4: u32EffAddr = 0; /*none */ break;
8258 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8259 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8260 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8262 }
8263 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8264
8265 /* add base */
8266 switch (bSib & X86_SIB_BASE_MASK)
8267 {
8268 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8269 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8270 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8271 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8272 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8273 case 5:
8274 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8275 {
8276 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8277 SET_SS_DEF();
8278 }
8279 else
8280 {
8281 uint32_t u32Disp;
8282 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8283 u32EffAddr += u32Disp;
8284 }
8285 break;
8286 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8287 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8288 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8289 }
8290 break;
8291 }
8292 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8293 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8294 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8295 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8296 }
8297
8298 /* Get and add the displacement. */
8299 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8300 {
8301 case 0:
8302 break;
8303 case 1:
8304 {
8305 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8306 u32EffAddr += i8Disp;
8307 break;
8308 }
8309 case 2:
8310 {
8311 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8312 u32EffAddr += u32Disp;
8313 break;
8314 }
8315 default:
8316 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8317 }
8318
8319 }
8320 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8321 *pGCPtrEff = u32EffAddr;
8322 }
8323 }
8324 else
8325 {
8326 uint64_t u64EffAddr;
8327
8328 /* Handle the rip+disp32 form with no registers first. */
8329 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8330 {
8331 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8332 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8333 }
8334 else
8335 {
8336 /* Get the register (or SIB) value. */
8337 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8338 {
8339 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8340 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8341 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8342 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8343 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8344 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8345 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8346 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8347 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8348 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8349 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8350 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8351 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8352 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8353 /* SIB */
8354 case 4:
8355 case 12:
8356 {
8357 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8358
8359 /* Get the index and scale it. */
8360 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8361 {
8362 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8363 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8364 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8365 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8366 case 4: u64EffAddr = 0; /*none */ break;
8367 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8368 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8369 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8370 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8371 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8372 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8373 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8374 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8375 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8376 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8377 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8378 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8379 }
8380 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8381
8382 /* add base */
8383 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8384 {
8385 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8386 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8387 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8388 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8389 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8390 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8391 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8392 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8393 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8394 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8395 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8396 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8397 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8398 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8399 /* complicated encodings */
8400 case 5:
8401 case 13:
8402 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8403 {
8404 if (!pVCpu->iem.s.uRexB)
8405 {
8406 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8407 SET_SS_DEF();
8408 }
8409 else
8410 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8411 }
8412 else
8413 {
8414 uint32_t u32Disp;
8415 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8416 u64EffAddr += (int32_t)u32Disp;
8417 }
8418 break;
8419 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8420 }
8421 break;
8422 }
8423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8424 }
8425
8426 /* Get and add the displacement. */
8427 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8428 {
8429 case 0:
8430 break;
8431 case 1:
8432 {
8433 int8_t i8Disp;
8434 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8435 u64EffAddr += i8Disp;
8436 break;
8437 }
8438 case 2:
8439 {
8440 uint32_t u32Disp;
8441 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8442 u64EffAddr += (int32_t)u32Disp;
8443 break;
8444 }
8445 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8446 }
8447
8448 }
8449
8450 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8451 *pGCPtrEff = u64EffAddr;
8452 else
8453 {
8454 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8455 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8456 }
8457 }
8458
8459 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8460 return VINF_SUCCESS;
8461}
8462
8463
8464#ifdef IEM_WITH_SETJMP
8465/**
8466 * Calculates the effective address of a ModR/M memory operand.
8467 *
8468 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8469 *
8470 * May longjmp on internal error.
8471 *
8472 * @return The effective address.
8473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8474 * @param bRm The ModRM byte.
8475 * @param cbImmAndRspOffset - First byte: The size of any immediate
8476 * following the effective address opcode bytes
8477 * (only for RIP relative addressing).
8478 * - Second byte: RSP displacement (for POP [ESP]).
8479 */
8480RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8481{
8482 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8483# define SET_SS_DEF() \
8484 do \
8485 { \
8486 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8487 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8488 } while (0)
8489
8490 if (!IEM_IS_64BIT_CODE(pVCpu))
8491 {
8492/** @todo Check the effective address size crap! */
8493 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8494 {
8495 uint16_t u16EffAddr;
8496
8497 /* Handle the disp16 form with no registers first. */
8498 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8499 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8500 else
8501 {
8502 /* Get the displacment. */
8503 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8504 {
8505 case 0: u16EffAddr = 0; break;
8506 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8507 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8508 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8509 }
8510
8511 /* Add the base and index registers to the disp. */
8512 switch (bRm & X86_MODRM_RM_MASK)
8513 {
8514 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8515 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8516 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8517 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8518 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8519 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8520 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8521 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8522 }
8523 }
8524
8525 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8526 return u16EffAddr;
8527 }
8528
8529 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8530 uint32_t u32EffAddr;
8531
8532 /* Handle the disp32 form with no registers first. */
8533 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8534 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8535 else
8536 {
8537 /* Get the register (or SIB) value. */
8538 switch ((bRm & X86_MODRM_RM_MASK))
8539 {
8540 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8541 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8542 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8543 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8544 case 4: /* SIB */
8545 {
8546 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8547
8548 /* Get the index and scale it. */
8549 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8550 {
8551 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8552 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8553 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8554 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8555 case 4: u32EffAddr = 0; /*none */ break;
8556 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8557 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8558 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8559 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8560 }
8561 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8562
8563 /* add base */
8564 switch (bSib & X86_SIB_BASE_MASK)
8565 {
8566 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8567 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8568 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8569 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8570 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8571 case 5:
8572 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8573 {
8574 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8575 SET_SS_DEF();
8576 }
8577 else
8578 {
8579 uint32_t u32Disp;
8580 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8581 u32EffAddr += u32Disp;
8582 }
8583 break;
8584 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8585 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8586 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8587 }
8588 break;
8589 }
8590 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8591 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8592 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8593 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8594 }
8595
8596 /* Get and add the displacement. */
8597 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8598 {
8599 case 0:
8600 break;
8601 case 1:
8602 {
8603 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8604 u32EffAddr += i8Disp;
8605 break;
8606 }
8607 case 2:
8608 {
8609 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8610 u32EffAddr += u32Disp;
8611 break;
8612 }
8613 default:
8614 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8615 }
8616 }
8617
8618 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8619 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8620 return u32EffAddr;
8621 }
8622
8623 uint64_t u64EffAddr;
8624
8625 /* Handle the rip+disp32 form with no registers first. */
8626 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8627 {
8628 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8629 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8630 }
8631 else
8632 {
8633 /* Get the register (or SIB) value. */
8634 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8635 {
8636 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8637 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8638 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8639 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8640 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8641 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8642 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8643 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8644 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8645 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8646 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8647 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8648 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8649 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8650 /* SIB */
8651 case 4:
8652 case 12:
8653 {
8654 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8655
8656 /* Get the index and scale it. */
8657 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8658 {
8659 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8660 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8661 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8662 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8663 case 4: u64EffAddr = 0; /*none */ break;
8664 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8665 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8666 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8667 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8668 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8669 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8670 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8671 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8672 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8673 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8674 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8676 }
8677 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8678
8679 /* add base */
8680 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8681 {
8682 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8683 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8684 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8685 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8686 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8687 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8688 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8689 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8690 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8691 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8692 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8693 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8694 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8695 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8696 /* complicated encodings */
8697 case 5:
8698 case 13:
8699 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8700 {
8701 if (!pVCpu->iem.s.uRexB)
8702 {
8703 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8704 SET_SS_DEF();
8705 }
8706 else
8707 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8708 }
8709 else
8710 {
8711 uint32_t u32Disp;
8712 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8713 u64EffAddr += (int32_t)u32Disp;
8714 }
8715 break;
8716 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8717 }
8718 break;
8719 }
8720 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8721 }
8722
8723 /* Get and add the displacement. */
8724 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8725 {
8726 case 0:
8727 break;
8728 case 1:
8729 {
8730 int8_t i8Disp;
8731 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8732 u64EffAddr += i8Disp;
8733 break;
8734 }
8735 case 2:
8736 {
8737 uint32_t u32Disp;
8738 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8739 u64EffAddr += (int32_t)u32Disp;
8740 break;
8741 }
8742 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8743 }
8744
8745 }
8746
8747 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8748 {
8749 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8750 return u64EffAddr;
8751 }
8752 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8753 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8754 return u64EffAddr & UINT32_MAX;
8755}
8756#endif /* IEM_WITH_SETJMP */
8757
8758
8759/**
8760 * Calculates the effective address of a ModR/M memory operand, extended version
8761 * for use in the recompilers.
8762 *
8763 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8764 *
8765 * @return Strict VBox status code.
8766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8767 * @param bRm The ModRM byte.
8768 * @param cbImmAndRspOffset - First byte: The size of any immediate
8769 * following the effective address opcode bytes
8770 * (only for RIP relative addressing).
8771 * - Second byte: RSP displacement (for POP [ESP]).
8772 * @param pGCPtrEff Where to return the effective address.
8773 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8774 * SIB byte (bits 39:32).
8775 */
8776VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8777{
8778 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8779# define SET_SS_DEF() \
8780 do \
8781 { \
8782 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8783 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8784 } while (0)
8785
8786 uint64_t uInfo;
8787 if (!IEM_IS_64BIT_CODE(pVCpu))
8788 {
8789/** @todo Check the effective address size crap! */
8790 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8791 {
8792 uint16_t u16EffAddr;
8793
8794 /* Handle the disp16 form with no registers first. */
8795 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8796 {
8797 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8798 uInfo = u16EffAddr;
8799 }
8800 else
8801 {
8802 /* Get the displacment. */
8803 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8804 {
8805 case 0: u16EffAddr = 0; break;
8806 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8807 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8808 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8809 }
8810 uInfo = u16EffAddr;
8811
8812 /* Add the base and index registers to the disp. */
8813 switch (bRm & X86_MODRM_RM_MASK)
8814 {
8815 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8816 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8817 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8818 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8819 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8820 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8821 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8822 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8823 }
8824 }
8825
8826 *pGCPtrEff = u16EffAddr;
8827 }
8828 else
8829 {
8830 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8831 uint32_t u32EffAddr;
8832
8833 /* Handle the disp32 form with no registers first. */
8834 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8835 {
8836 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8837 uInfo = u32EffAddr;
8838 }
8839 else
8840 {
8841 /* Get the register (or SIB) value. */
8842 uInfo = 0;
8843 switch ((bRm & X86_MODRM_RM_MASK))
8844 {
8845 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8846 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8847 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8848 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8849 case 4: /* SIB */
8850 {
8851 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8852 uInfo = (uint64_t)bSib << 32;
8853
8854 /* Get the index and scale it. */
8855 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8856 {
8857 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8858 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8859 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8860 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8861 case 4: u32EffAddr = 0; /*none */ break;
8862 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8863 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8864 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8866 }
8867 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8868
8869 /* add base */
8870 switch (bSib & X86_SIB_BASE_MASK)
8871 {
8872 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8873 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8874 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8875 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8876 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8877 case 5:
8878 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8879 {
8880 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8881 SET_SS_DEF();
8882 }
8883 else
8884 {
8885 uint32_t u32Disp;
8886 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8887 u32EffAddr += u32Disp;
8888 uInfo |= u32Disp;
8889 }
8890 break;
8891 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8892 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8893 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8894 }
8895 break;
8896 }
8897 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8898 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8899 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8900 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8901 }
8902
8903 /* Get and add the displacement. */
8904 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8905 {
8906 case 0:
8907 break;
8908 case 1:
8909 {
8910 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8911 u32EffAddr += i8Disp;
8912 uInfo |= (uint32_t)(int32_t)i8Disp;
8913 break;
8914 }
8915 case 2:
8916 {
8917 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8918 u32EffAddr += u32Disp;
8919 uInfo |= (uint32_t)u32Disp;
8920 break;
8921 }
8922 default:
8923 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8924 }
8925
8926 }
8927 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8928 *pGCPtrEff = u32EffAddr;
8929 }
8930 }
8931 else
8932 {
8933 uint64_t u64EffAddr;
8934
8935 /* Handle the rip+disp32 form with no registers first. */
8936 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8937 {
8938 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8939 uInfo = (uint32_t)u64EffAddr;
8940 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8941 }
8942 else
8943 {
8944 /* Get the register (or SIB) value. */
8945 uInfo = 0;
8946 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8947 {
8948 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8949 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8950 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8951 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8952 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8953 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8954 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8955 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8956 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8957 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8958 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8959 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8960 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8961 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8962 /* SIB */
8963 case 4:
8964 case 12:
8965 {
8966 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8967 uInfo = (uint64_t)bSib << 32;
8968
8969 /* Get the index and scale it. */
8970 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8971 {
8972 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8973 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8974 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8975 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8976 case 4: u64EffAddr = 0; /*none */ break;
8977 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8978 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8979 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8980 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8981 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8982 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8983 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8984 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8985 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8986 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8987 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8988 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8989 }
8990 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8991
8992 /* add base */
8993 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8994 {
8995 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8996 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8997 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8998 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8999 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9000 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9001 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9002 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9003 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9004 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9005 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9006 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9007 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9008 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9009 /* complicated encodings */
9010 case 5:
9011 case 13:
9012 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9013 {
9014 if (!pVCpu->iem.s.uRexB)
9015 {
9016 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9017 SET_SS_DEF();
9018 }
9019 else
9020 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9021 }
9022 else
9023 {
9024 uint32_t u32Disp;
9025 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9026 u64EffAddr += (int32_t)u32Disp;
9027 uInfo |= u32Disp;
9028 }
9029 break;
9030 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9031 }
9032 break;
9033 }
9034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9035 }
9036
9037 /* Get and add the displacement. */
9038 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9039 {
9040 case 0:
9041 break;
9042 case 1:
9043 {
9044 int8_t i8Disp;
9045 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9046 u64EffAddr += i8Disp;
9047 uInfo |= (uint32_t)(int32_t)i8Disp;
9048 break;
9049 }
9050 case 2:
9051 {
9052 uint32_t u32Disp;
9053 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9054 u64EffAddr += (int32_t)u32Disp;
9055 uInfo |= u32Disp;
9056 break;
9057 }
9058 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9059 }
9060
9061 }
9062
9063 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9064 *pGCPtrEff = u64EffAddr;
9065 else
9066 {
9067 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9068 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9069 }
9070 }
9071 *puInfo = uInfo;
9072
9073 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9074 return VINF_SUCCESS;
9075}
9076
9077/** @} */
9078
9079
9080#ifdef LOG_ENABLED
9081/**
9082 * Logs the current instruction.
9083 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9084 * @param fSameCtx Set if we have the same context information as the VMM,
9085 * clear if we may have already executed an instruction in
9086 * our debug context. When clear, we assume IEMCPU holds
9087 * valid CPU mode info.
9088 *
9089 * The @a fSameCtx parameter is now misleading and obsolete.
9090 * @param pszFunction The IEM function doing the execution.
9091 */
9092static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9093{
9094# ifdef IN_RING3
9095 if (LogIs2Enabled())
9096 {
9097 char szInstr[256];
9098 uint32_t cbInstr = 0;
9099 if (fSameCtx)
9100 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9101 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9102 szInstr, sizeof(szInstr), &cbInstr);
9103 else
9104 {
9105 uint32_t fFlags = 0;
9106 switch (IEM_GET_CPU_MODE(pVCpu))
9107 {
9108 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9109 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9110 case IEMMODE_16BIT:
9111 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9112 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9113 else
9114 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9115 break;
9116 }
9117 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9118 szInstr, sizeof(szInstr), &cbInstr);
9119 }
9120
9121 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9122 Log2(("**** %s fExec=%x\n"
9123 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9124 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9125 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9126 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9127 " %s\n"
9128 , pszFunction, pVCpu->iem.s.fExec,
9129 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9130 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9131 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9132 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9133 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9134 szInstr));
9135
9136 /* This stuff sucks atm. as it fills the log with MSRs. */
9137 //if (LogIs3Enabled())
9138 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9139 }
9140 else
9141# endif
9142 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9143 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9144 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9145}
9146#endif /* LOG_ENABLED */
9147
9148
9149#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9150/**
9151 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9152 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9153 *
9154 * @returns Modified rcStrict.
9155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9156 * @param rcStrict The instruction execution status.
9157 */
9158static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9159{
9160 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9161 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9162 {
9163 /* VMX preemption timer takes priority over NMI-window exits. */
9164 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9165 {
9166 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9167 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9168 }
9169 /*
9170 * Check remaining intercepts.
9171 *
9172 * NMI-window and Interrupt-window VM-exits.
9173 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9174 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9175 *
9176 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9177 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9178 */
9179 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9180 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9181 && !TRPMHasTrap(pVCpu))
9182 {
9183 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9184 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9185 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9186 {
9187 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9188 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9189 }
9190 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9191 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9192 {
9193 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9195 }
9196 }
9197 }
9198 /* TPR-below threshold/APIC write has the highest priority. */
9199 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9200 {
9201 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9202 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9203 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9204 }
9205 /* MTF takes priority over VMX-preemption timer. */
9206 else
9207 {
9208 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9209 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9210 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9211 }
9212 return rcStrict;
9213}
9214#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9215
9216
9217/**
9218 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9219 * IEMExecOneWithPrefetchedByPC.
9220 *
9221 * Similar code is found in IEMExecLots.
9222 *
9223 * @return Strict VBox status code.
9224 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9225 * @param fExecuteInhibit If set, execute the instruction following CLI,
9226 * POP SS and MOV SS,GR.
9227 * @param pszFunction The calling function name.
9228 */
9229DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9230{
9231 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9232 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9233 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9234 RT_NOREF_PV(pszFunction);
9235
9236#ifdef IEM_WITH_SETJMP
9237 VBOXSTRICTRC rcStrict;
9238 IEM_TRY_SETJMP(pVCpu, rcStrict)
9239 {
9240 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9241 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9242 }
9243 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9244 {
9245 pVCpu->iem.s.cLongJumps++;
9246 }
9247 IEM_CATCH_LONGJMP_END(pVCpu);
9248#else
9249 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9250 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9251#endif
9252 if (rcStrict == VINF_SUCCESS)
9253 pVCpu->iem.s.cInstructions++;
9254 if (pVCpu->iem.s.cActiveMappings > 0)
9255 {
9256 Assert(rcStrict != VINF_SUCCESS);
9257 iemMemRollback(pVCpu);
9258 }
9259 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9260 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9261 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9262
9263//#ifdef DEBUG
9264// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9265//#endif
9266
9267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9268 /*
9269 * Perform any VMX nested-guest instruction boundary actions.
9270 *
9271 * If any of these causes a VM-exit, we must skip executing the next
9272 * instruction (would run into stale page tables). A VM-exit makes sure
9273 * there is no interrupt-inhibition, so that should ensure we don't go
9274 * to try execute the next instruction. Clearing fExecuteInhibit is
9275 * problematic because of the setjmp/longjmp clobbering above.
9276 */
9277 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9278 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9279 || rcStrict != VINF_SUCCESS)
9280 { /* likely */ }
9281 else
9282 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9283#endif
9284
9285 /* Execute the next instruction as well if a cli, pop ss or
9286 mov ss, Gr has just completed successfully. */
9287 if ( fExecuteInhibit
9288 && rcStrict == VINF_SUCCESS
9289 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9290 {
9291 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9292 if (rcStrict == VINF_SUCCESS)
9293 {
9294#ifdef LOG_ENABLED
9295 iemLogCurInstr(pVCpu, false, pszFunction);
9296#endif
9297#ifdef IEM_WITH_SETJMP
9298 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9299 {
9300 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9301 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9302 }
9303 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9304 {
9305 pVCpu->iem.s.cLongJumps++;
9306 }
9307 IEM_CATCH_LONGJMP_END(pVCpu);
9308#else
9309 IEM_OPCODE_GET_FIRST_U8(&b);
9310 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9311#endif
9312 if (rcStrict == VINF_SUCCESS)
9313 {
9314 pVCpu->iem.s.cInstructions++;
9315#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9316 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9317 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9318 { /* likely */ }
9319 else
9320 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9321#endif
9322 }
9323 if (pVCpu->iem.s.cActiveMappings > 0)
9324 {
9325 Assert(rcStrict != VINF_SUCCESS);
9326 iemMemRollback(pVCpu);
9327 }
9328 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9329 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9330 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9331 }
9332 else if (pVCpu->iem.s.cActiveMappings > 0)
9333 iemMemRollback(pVCpu);
9334 /** @todo drop this after we bake this change into RIP advancing. */
9335 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9336 }
9337
9338 /*
9339 * Return value fiddling, statistics and sanity assertions.
9340 */
9341 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9342
9343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9345 return rcStrict;
9346}
9347
9348
9349/**
9350 * Execute one instruction.
9351 *
9352 * @return Strict VBox status code.
9353 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9354 */
9355VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9356{
9357 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9358#ifdef LOG_ENABLED
9359 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9360#endif
9361
9362 /*
9363 * Do the decoding and emulation.
9364 */
9365 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9366 if (rcStrict == VINF_SUCCESS)
9367 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9368 else if (pVCpu->iem.s.cActiveMappings > 0)
9369 iemMemRollback(pVCpu);
9370
9371 if (rcStrict != VINF_SUCCESS)
9372 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9373 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9374 return rcStrict;
9375}
9376
9377
9378VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9379{
9380 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9381 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9382 if (rcStrict == VINF_SUCCESS)
9383 {
9384 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9385 if (pcbWritten)
9386 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9387 }
9388 else if (pVCpu->iem.s.cActiveMappings > 0)
9389 iemMemRollback(pVCpu);
9390
9391 return rcStrict;
9392}
9393
9394
9395VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9396 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9397{
9398 VBOXSTRICTRC rcStrict;
9399 if ( cbOpcodeBytes
9400 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9401 {
9402 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9403#ifdef IEM_WITH_CODE_TLB
9404 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9405 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9406 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9407 pVCpu->iem.s.offCurInstrStart = 0;
9408 pVCpu->iem.s.offInstrNextByte = 0;
9409 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9410#else
9411 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9412 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9413#endif
9414 rcStrict = VINF_SUCCESS;
9415 }
9416 else
9417 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9418 if (rcStrict == VINF_SUCCESS)
9419 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9420 else if (pVCpu->iem.s.cActiveMappings > 0)
9421 iemMemRollback(pVCpu);
9422
9423 return rcStrict;
9424}
9425
9426
9427VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9428{
9429 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9430 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9431 if (rcStrict == VINF_SUCCESS)
9432 {
9433 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9434 if (pcbWritten)
9435 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9436 }
9437 else if (pVCpu->iem.s.cActiveMappings > 0)
9438 iemMemRollback(pVCpu);
9439
9440 return rcStrict;
9441}
9442
9443
9444VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9445 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9446{
9447 VBOXSTRICTRC rcStrict;
9448 if ( cbOpcodeBytes
9449 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9450 {
9451 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9452#ifdef IEM_WITH_CODE_TLB
9453 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9454 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9455 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9456 pVCpu->iem.s.offCurInstrStart = 0;
9457 pVCpu->iem.s.offInstrNextByte = 0;
9458 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9459#else
9460 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9461 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9462#endif
9463 rcStrict = VINF_SUCCESS;
9464 }
9465 else
9466 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9467 if (rcStrict == VINF_SUCCESS)
9468 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9469 else if (pVCpu->iem.s.cActiveMappings > 0)
9470 iemMemRollback(pVCpu);
9471
9472 return rcStrict;
9473}
9474
9475
9476/**
9477 * For handling split cacheline lock operations when the host has split-lock
9478 * detection enabled.
9479 *
9480 * This will cause the interpreter to disregard the lock prefix and implicit
9481 * locking (xchg).
9482 *
9483 * @returns Strict VBox status code.
9484 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9485 */
9486VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9487{
9488 /*
9489 * Do the decoding and emulation.
9490 */
9491 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9492 if (rcStrict == VINF_SUCCESS)
9493 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9494 else if (pVCpu->iem.s.cActiveMappings > 0)
9495 iemMemRollback(pVCpu);
9496
9497 if (rcStrict != VINF_SUCCESS)
9498 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9499 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9500 return rcStrict;
9501}
9502
9503
9504/**
9505 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9506 * inject a pending TRPM trap.
9507 */
9508VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9509{
9510 Assert(TRPMHasTrap(pVCpu));
9511
9512 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9513 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9514 {
9515 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9516#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9517 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9518 if (fIntrEnabled)
9519 {
9520 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9521 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9522 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9523 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9524 else
9525 {
9526 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9527 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9528 }
9529 }
9530#else
9531 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9532#endif
9533 if (fIntrEnabled)
9534 {
9535 uint8_t u8TrapNo;
9536 TRPMEVENT enmType;
9537 uint32_t uErrCode;
9538 RTGCPTR uCr2;
9539 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9540 AssertRC(rc2);
9541 Assert(enmType == TRPM_HARDWARE_INT);
9542 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9543
9544 TRPMResetTrap(pVCpu);
9545
9546#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9547 /* Injecting an event may cause a VM-exit. */
9548 if ( rcStrict != VINF_SUCCESS
9549 && rcStrict != VINF_IEM_RAISED_XCPT)
9550 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9551#else
9552 NOREF(rcStrict);
9553#endif
9554 }
9555 }
9556
9557 return VINF_SUCCESS;
9558}
9559
9560
9561VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9562{
9563 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9564 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9565 Assert(cMaxInstructions > 0);
9566
9567 /*
9568 * See if there is an interrupt pending in TRPM, inject it if we can.
9569 */
9570 /** @todo What if we are injecting an exception and not an interrupt? Is that
9571 * possible here? For now we assert it is indeed only an interrupt. */
9572 if (!TRPMHasTrap(pVCpu))
9573 { /* likely */ }
9574 else
9575 {
9576 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9577 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9578 { /*likely */ }
9579 else
9580 return rcStrict;
9581 }
9582
9583 /*
9584 * Initial decoder init w/ prefetch, then setup setjmp.
9585 */
9586 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9587 if (rcStrict == VINF_SUCCESS)
9588 {
9589#ifdef IEM_WITH_SETJMP
9590 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9591 IEM_TRY_SETJMP(pVCpu, rcStrict)
9592#endif
9593 {
9594 /*
9595 * The run loop. We limit ourselves to 4096 instructions right now.
9596 */
9597 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9598 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9599 for (;;)
9600 {
9601 /*
9602 * Log the state.
9603 */
9604#ifdef LOG_ENABLED
9605 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9606#endif
9607
9608 /*
9609 * Do the decoding and emulation.
9610 */
9611 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9612 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9613#ifdef VBOX_STRICT
9614 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9615#endif
9616 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9617 {
9618 Assert(pVCpu->iem.s.cActiveMappings == 0);
9619 pVCpu->iem.s.cInstructions++;
9620
9621#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9622 /* Perform any VMX nested-guest instruction boundary actions. */
9623 uint64_t fCpu = pVCpu->fLocalForcedActions;
9624 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9625 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9626 { /* likely */ }
9627 else
9628 {
9629 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9630 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9631 fCpu = pVCpu->fLocalForcedActions;
9632 else
9633 {
9634 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9635 break;
9636 }
9637 }
9638#endif
9639 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9640 {
9641#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9642 uint64_t fCpu = pVCpu->fLocalForcedActions;
9643#endif
9644 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9645 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9646 | VMCPU_FF_TLB_FLUSH
9647 | VMCPU_FF_UNHALT );
9648
9649 if (RT_LIKELY( ( !fCpu
9650 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9651 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9652 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9653 {
9654 if (--cMaxInstructionsGccStupidity > 0)
9655 {
9656 /* Poll timers every now an then according to the caller's specs. */
9657 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9658 || !TMTimerPollBool(pVM, pVCpu))
9659 {
9660 Assert(pVCpu->iem.s.cActiveMappings == 0);
9661 iemReInitDecoder(pVCpu);
9662 continue;
9663 }
9664 }
9665 }
9666 }
9667 Assert(pVCpu->iem.s.cActiveMappings == 0);
9668 }
9669 else if (pVCpu->iem.s.cActiveMappings > 0)
9670 iemMemRollback(pVCpu);
9671 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9672 break;
9673 }
9674 }
9675#ifdef IEM_WITH_SETJMP
9676 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9677 {
9678 if (pVCpu->iem.s.cActiveMappings > 0)
9679 iemMemRollback(pVCpu);
9680# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9681 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9682# endif
9683 pVCpu->iem.s.cLongJumps++;
9684 }
9685 IEM_CATCH_LONGJMP_END(pVCpu);
9686#endif
9687
9688 /*
9689 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9690 */
9691 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9693 }
9694 else
9695 {
9696 if (pVCpu->iem.s.cActiveMappings > 0)
9697 iemMemRollback(pVCpu);
9698
9699#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9700 /*
9701 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9702 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9703 */
9704 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9705#endif
9706 }
9707
9708 /*
9709 * Maybe re-enter raw-mode and log.
9710 */
9711 if (rcStrict != VINF_SUCCESS)
9712 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9713 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9714 if (pcInstructions)
9715 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9716 return rcStrict;
9717}
9718
9719
9720/**
9721 * Interface used by EMExecuteExec, does exit statistics and limits.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure.
9725 * @param fWillExit To be defined.
9726 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9727 * @param cMaxInstructions Maximum number of instructions to execute.
9728 * @param cMaxInstructionsWithoutExits
9729 * The max number of instructions without exits.
9730 * @param pStats Where to return statistics.
9731 */
9732VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9733 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9734{
9735 NOREF(fWillExit); /** @todo define flexible exit crits */
9736
9737 /*
9738 * Initialize return stats.
9739 */
9740 pStats->cInstructions = 0;
9741 pStats->cExits = 0;
9742 pStats->cMaxExitDistance = 0;
9743 pStats->cReserved = 0;
9744
9745 /*
9746 * Initial decoder init w/ prefetch, then setup setjmp.
9747 */
9748 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9749 if (rcStrict == VINF_SUCCESS)
9750 {
9751#ifdef IEM_WITH_SETJMP
9752 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9753 IEM_TRY_SETJMP(pVCpu, rcStrict)
9754#endif
9755 {
9756#ifdef IN_RING0
9757 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9758#endif
9759 uint32_t cInstructionSinceLastExit = 0;
9760
9761 /*
9762 * The run loop. We limit ourselves to 4096 instructions right now.
9763 */
9764 PVM pVM = pVCpu->CTX_SUFF(pVM);
9765 for (;;)
9766 {
9767 /*
9768 * Log the state.
9769 */
9770#ifdef LOG_ENABLED
9771 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9772#endif
9773
9774 /*
9775 * Do the decoding and emulation.
9776 */
9777 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9778
9779 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9780 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9781
9782 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9783 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9784 {
9785 pStats->cExits += 1;
9786 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9787 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9788 cInstructionSinceLastExit = 0;
9789 }
9790
9791 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9792 {
9793 Assert(pVCpu->iem.s.cActiveMappings == 0);
9794 pVCpu->iem.s.cInstructions++;
9795 pStats->cInstructions++;
9796 cInstructionSinceLastExit++;
9797
9798#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9799 /* Perform any VMX nested-guest instruction boundary actions. */
9800 uint64_t fCpu = pVCpu->fLocalForcedActions;
9801 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9802 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9803 { /* likely */ }
9804 else
9805 {
9806 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9807 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9808 fCpu = pVCpu->fLocalForcedActions;
9809 else
9810 {
9811 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9812 break;
9813 }
9814 }
9815#endif
9816 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9817 {
9818#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9819 uint64_t fCpu = pVCpu->fLocalForcedActions;
9820#endif
9821 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9822 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9823 | VMCPU_FF_TLB_FLUSH
9824 | VMCPU_FF_UNHALT );
9825 if (RT_LIKELY( ( ( !fCpu
9826 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9827 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9828 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9829 || pStats->cInstructions < cMinInstructions))
9830 {
9831 if (pStats->cInstructions < cMaxInstructions)
9832 {
9833 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9834 {
9835#ifdef IN_RING0
9836 if ( !fCheckPreemptionPending
9837 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9838#endif
9839 {
9840 Assert(pVCpu->iem.s.cActiveMappings == 0);
9841 iemReInitDecoder(pVCpu);
9842 continue;
9843 }
9844#ifdef IN_RING0
9845 rcStrict = VINF_EM_RAW_INTERRUPT;
9846 break;
9847#endif
9848 }
9849 }
9850 }
9851 Assert(!(fCpu & VMCPU_FF_IEM));
9852 }
9853 Assert(pVCpu->iem.s.cActiveMappings == 0);
9854 }
9855 else if (pVCpu->iem.s.cActiveMappings > 0)
9856 iemMemRollback(pVCpu);
9857 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9858 break;
9859 }
9860 }
9861#ifdef IEM_WITH_SETJMP
9862 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9863 {
9864 if (pVCpu->iem.s.cActiveMappings > 0)
9865 iemMemRollback(pVCpu);
9866 pVCpu->iem.s.cLongJumps++;
9867 }
9868 IEM_CATCH_LONGJMP_END(pVCpu);
9869#endif
9870
9871 /*
9872 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9873 */
9874 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9875 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9876 }
9877 else
9878 {
9879 if (pVCpu->iem.s.cActiveMappings > 0)
9880 iemMemRollback(pVCpu);
9881
9882#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9883 /*
9884 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9885 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9886 */
9887 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9888#endif
9889 }
9890
9891 /*
9892 * Maybe re-enter raw-mode and log.
9893 */
9894 if (rcStrict != VINF_SUCCESS)
9895 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9896 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9897 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9898 return rcStrict;
9899}
9900
9901
9902/**
9903 * Injects a trap, fault, abort, software interrupt or external interrupt.
9904 *
9905 * The parameter list matches TRPMQueryTrapAll pretty closely.
9906 *
9907 * @returns Strict VBox status code.
9908 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9909 * @param u8TrapNo The trap number.
9910 * @param enmType What type is it (trap/fault/abort), software
9911 * interrupt or hardware interrupt.
9912 * @param uErrCode The error code if applicable.
9913 * @param uCr2 The CR2 value if applicable.
9914 * @param cbInstr The instruction length (only relevant for
9915 * software interrupts).
9916 */
9917VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9918 uint8_t cbInstr)
9919{
9920 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9921#ifdef DBGFTRACE_ENABLED
9922 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9923 u8TrapNo, enmType, uErrCode, uCr2);
9924#endif
9925
9926 uint32_t fFlags;
9927 switch (enmType)
9928 {
9929 case TRPM_HARDWARE_INT:
9930 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9931 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9932 uErrCode = uCr2 = 0;
9933 break;
9934
9935 case TRPM_SOFTWARE_INT:
9936 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9937 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9938 uErrCode = uCr2 = 0;
9939 break;
9940
9941 case TRPM_TRAP:
9942 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9943 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9944 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9945 if (u8TrapNo == X86_XCPT_PF)
9946 fFlags |= IEM_XCPT_FLAGS_CR2;
9947 switch (u8TrapNo)
9948 {
9949 case X86_XCPT_DF:
9950 case X86_XCPT_TS:
9951 case X86_XCPT_NP:
9952 case X86_XCPT_SS:
9953 case X86_XCPT_PF:
9954 case X86_XCPT_AC:
9955 case X86_XCPT_GP:
9956 fFlags |= IEM_XCPT_FLAGS_ERR;
9957 break;
9958 }
9959 break;
9960
9961 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9962 }
9963
9964 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9965
9966 if (pVCpu->iem.s.cActiveMappings > 0)
9967 iemMemRollback(pVCpu);
9968
9969 return rcStrict;
9970}
9971
9972
9973/**
9974 * Injects the active TRPM event.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure.
9978 */
9979VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9980{
9981#ifndef IEM_IMPLEMENTS_TASKSWITCH
9982 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9983#else
9984 uint8_t u8TrapNo;
9985 TRPMEVENT enmType;
9986 uint32_t uErrCode;
9987 RTGCUINTPTR uCr2;
9988 uint8_t cbInstr;
9989 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9990 if (RT_FAILURE(rc))
9991 return rc;
9992
9993 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9994 * ICEBP \#DB injection as a special case. */
9995 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9996#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9997 if (rcStrict == VINF_SVM_VMEXIT)
9998 rcStrict = VINF_SUCCESS;
9999#endif
10000#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10001 if (rcStrict == VINF_VMX_VMEXIT)
10002 rcStrict = VINF_SUCCESS;
10003#endif
10004 /** @todo Are there any other codes that imply the event was successfully
10005 * delivered to the guest? See @bugref{6607}. */
10006 if ( rcStrict == VINF_SUCCESS
10007 || rcStrict == VINF_IEM_RAISED_XCPT)
10008 TRPMResetTrap(pVCpu);
10009
10010 return rcStrict;
10011#endif
10012}
10013
10014
10015VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10016{
10017 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10018 return VERR_NOT_IMPLEMENTED;
10019}
10020
10021
10022VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10023{
10024 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10025 return VERR_NOT_IMPLEMENTED;
10026}
10027
10028
10029/**
10030 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10031 *
10032 * This API ASSUMES that the caller has already verified that the guest code is
10033 * allowed to access the I/O port. (The I/O port is in the DX register in the
10034 * guest state.)
10035 *
10036 * @returns Strict VBox status code.
10037 * @param pVCpu The cross context virtual CPU structure.
10038 * @param cbValue The size of the I/O port access (1, 2, or 4).
10039 * @param enmAddrMode The addressing mode.
10040 * @param fRepPrefix Indicates whether a repeat prefix is used
10041 * (doesn't matter which for this instruction).
10042 * @param cbInstr The instruction length in bytes.
10043 * @param iEffSeg The effective segment address.
10044 * @param fIoChecked Whether the access to the I/O port has been
10045 * checked or not. It's typically checked in the
10046 * HM scenario.
10047 */
10048VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10049 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10050{
10051 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10052 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10053
10054 /*
10055 * State init.
10056 */
10057 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10058
10059 /*
10060 * Switch orgy for getting to the right handler.
10061 */
10062 VBOXSTRICTRC rcStrict;
10063 if (fRepPrefix)
10064 {
10065 switch (enmAddrMode)
10066 {
10067 case IEMMODE_16BIT:
10068 switch (cbValue)
10069 {
10070 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10071 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10072 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10073 default:
10074 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10075 }
10076 break;
10077
10078 case IEMMODE_32BIT:
10079 switch (cbValue)
10080 {
10081 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10082 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10083 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10084 default:
10085 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10086 }
10087 break;
10088
10089 case IEMMODE_64BIT:
10090 switch (cbValue)
10091 {
10092 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10093 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10094 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10095 default:
10096 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10097 }
10098 break;
10099
10100 default:
10101 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10102 }
10103 }
10104 else
10105 {
10106 switch (enmAddrMode)
10107 {
10108 case IEMMODE_16BIT:
10109 switch (cbValue)
10110 {
10111 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10112 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10113 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10114 default:
10115 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10116 }
10117 break;
10118
10119 case IEMMODE_32BIT:
10120 switch (cbValue)
10121 {
10122 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10123 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10124 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10125 default:
10126 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10127 }
10128 break;
10129
10130 case IEMMODE_64BIT:
10131 switch (cbValue)
10132 {
10133 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10134 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10135 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10136 default:
10137 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10138 }
10139 break;
10140
10141 default:
10142 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10143 }
10144 }
10145
10146 if (pVCpu->iem.s.cActiveMappings)
10147 iemMemRollback(pVCpu);
10148
10149 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10150}
10151
10152
10153/**
10154 * Interface for HM and EM for executing string I/O IN (read) instructions.
10155 *
10156 * This API ASSUMES that the caller has already verified that the guest code is
10157 * allowed to access the I/O port. (The I/O port is in the DX register in the
10158 * guest state.)
10159 *
10160 * @returns Strict VBox status code.
10161 * @param pVCpu The cross context virtual CPU structure.
10162 * @param cbValue The size of the I/O port access (1, 2, or 4).
10163 * @param enmAddrMode The addressing mode.
10164 * @param fRepPrefix Indicates whether a repeat prefix is used
10165 * (doesn't matter which for this instruction).
10166 * @param cbInstr The instruction length in bytes.
10167 * @param fIoChecked Whether the access to the I/O port has been
10168 * checked or not. It's typically checked in the
10169 * HM scenario.
10170 */
10171VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10172 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10173{
10174 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10175
10176 /*
10177 * State init.
10178 */
10179 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10180
10181 /*
10182 * Switch orgy for getting to the right handler.
10183 */
10184 VBOXSTRICTRC rcStrict;
10185 if (fRepPrefix)
10186 {
10187 switch (enmAddrMode)
10188 {
10189 case IEMMODE_16BIT:
10190 switch (cbValue)
10191 {
10192 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10193 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10194 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10195 default:
10196 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10197 }
10198 break;
10199
10200 case IEMMODE_32BIT:
10201 switch (cbValue)
10202 {
10203 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10204 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10205 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10206 default:
10207 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10208 }
10209 break;
10210
10211 case IEMMODE_64BIT:
10212 switch (cbValue)
10213 {
10214 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10215 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10216 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10217 default:
10218 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10219 }
10220 break;
10221
10222 default:
10223 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10224 }
10225 }
10226 else
10227 {
10228 switch (enmAddrMode)
10229 {
10230 case IEMMODE_16BIT:
10231 switch (cbValue)
10232 {
10233 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10234 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10235 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10236 default:
10237 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10238 }
10239 break;
10240
10241 case IEMMODE_32BIT:
10242 switch (cbValue)
10243 {
10244 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10245 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10246 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10247 default:
10248 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10249 }
10250 break;
10251
10252 case IEMMODE_64BIT:
10253 switch (cbValue)
10254 {
10255 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10256 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10257 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10258 default:
10259 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10260 }
10261 break;
10262
10263 default:
10264 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10265 }
10266 }
10267
10268 if ( pVCpu->iem.s.cActiveMappings == 0
10269 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10270 { /* likely */ }
10271 else
10272 {
10273 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10274 iemMemRollback(pVCpu);
10275 }
10276 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10277}
10278
10279
10280/**
10281 * Interface for rawmode to write execute an OUT instruction.
10282 *
10283 * @returns Strict VBox status code.
10284 * @param pVCpu The cross context virtual CPU structure.
10285 * @param cbInstr The instruction length in bytes.
10286 * @param u16Port The port to read.
10287 * @param fImm Whether the port is specified using an immediate operand or
10288 * using the implicit DX register.
10289 * @param cbReg The register size.
10290 *
10291 * @remarks In ring-0 not all of the state needs to be synced in.
10292 */
10293VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10294{
10295 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10296 Assert(cbReg <= 4 && cbReg != 3);
10297
10298 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10299 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10300 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10301 Assert(!pVCpu->iem.s.cActiveMappings);
10302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10303}
10304
10305
10306/**
10307 * Interface for rawmode to write execute an IN instruction.
10308 *
10309 * @returns Strict VBox status code.
10310 * @param pVCpu The cross context virtual CPU structure.
10311 * @param cbInstr The instruction length in bytes.
10312 * @param u16Port The port to read.
10313 * @param fImm Whether the port is specified using an immediate operand or
10314 * using the implicit DX.
10315 * @param cbReg The register size.
10316 */
10317VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10318{
10319 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10320 Assert(cbReg <= 4 && cbReg != 3);
10321
10322 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10324 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10325 Assert(!pVCpu->iem.s.cActiveMappings);
10326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10327}
10328
10329
10330/**
10331 * Interface for HM and EM to write to a CRx register.
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure.
10335 * @param cbInstr The instruction length in bytes.
10336 * @param iCrReg The control register number (destination).
10337 * @param iGReg The general purpose register number (source).
10338 *
10339 * @remarks In ring-0 not all of the state needs to be synced in.
10340 */
10341VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10342{
10343 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10344 Assert(iCrReg < 16);
10345 Assert(iGReg < 16);
10346
10347 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10349 Assert(!pVCpu->iem.s.cActiveMappings);
10350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10351}
10352
10353
10354/**
10355 * Interface for HM and EM to read from a CRx register.
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure.
10359 * @param cbInstr The instruction length in bytes.
10360 * @param iGReg The general purpose register number (destination).
10361 * @param iCrReg The control register number (source).
10362 *
10363 * @remarks In ring-0 not all of the state needs to be synced in.
10364 */
10365VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10366{
10367 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10368 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10369 | CPUMCTX_EXTRN_APIC_TPR);
10370 Assert(iCrReg < 16);
10371 Assert(iGReg < 16);
10372
10373 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10375 Assert(!pVCpu->iem.s.cActiveMappings);
10376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10377}
10378
10379
10380/**
10381 * Interface for HM and EM to write to a DRx register.
10382 *
10383 * @returns Strict VBox status code.
10384 * @param pVCpu The cross context virtual CPU structure.
10385 * @param cbInstr The instruction length in bytes.
10386 * @param iDrReg The debug register number (destination).
10387 * @param iGReg The general purpose register number (source).
10388 *
10389 * @remarks In ring-0 not all of the state needs to be synced in.
10390 */
10391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10392{
10393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10394 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10395 Assert(iDrReg < 8);
10396 Assert(iGReg < 16);
10397
10398 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10399 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10400 Assert(!pVCpu->iem.s.cActiveMappings);
10401 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10402}
10403
10404
10405/**
10406 * Interface for HM and EM to read from a DRx register.
10407 *
10408 * @returns Strict VBox status code.
10409 * @param pVCpu The cross context virtual CPU structure.
10410 * @param cbInstr The instruction length in bytes.
10411 * @param iGReg The general purpose register number (destination).
10412 * @param iDrReg The debug register number (source).
10413 *
10414 * @remarks In ring-0 not all of the state needs to be synced in.
10415 */
10416VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10417{
10418 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10419 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10420 Assert(iDrReg < 8);
10421 Assert(iGReg < 16);
10422
10423 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10424 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10425 Assert(!pVCpu->iem.s.cActiveMappings);
10426 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10427}
10428
10429
10430/**
10431 * Interface for HM and EM to clear the CR0[TS] bit.
10432 *
10433 * @returns Strict VBox status code.
10434 * @param pVCpu The cross context virtual CPU structure.
10435 * @param cbInstr The instruction length in bytes.
10436 *
10437 * @remarks In ring-0 not all of the state needs to be synced in.
10438 */
10439VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10440{
10441 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10442
10443 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10444 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10445 Assert(!pVCpu->iem.s.cActiveMappings);
10446 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10447}
10448
10449
10450/**
10451 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10452 *
10453 * @returns Strict VBox status code.
10454 * @param pVCpu The cross context virtual CPU structure.
10455 * @param cbInstr The instruction length in bytes.
10456 * @param uValue The value to load into CR0.
10457 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10458 * memory operand. Otherwise pass NIL_RTGCPTR.
10459 *
10460 * @remarks In ring-0 not all of the state needs to be synced in.
10461 */
10462VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10463{
10464 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10465
10466 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10467 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10468 Assert(!pVCpu->iem.s.cActiveMappings);
10469 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10470}
10471
10472
10473/**
10474 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10475 *
10476 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10477 *
10478 * @returns Strict VBox status code.
10479 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10480 * @param cbInstr The instruction length in bytes.
10481 * @remarks In ring-0 not all of the state needs to be synced in.
10482 * @thread EMT(pVCpu)
10483 */
10484VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10485{
10486 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10487
10488 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10490 Assert(!pVCpu->iem.s.cActiveMappings);
10491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10492}
10493
10494
10495/**
10496 * Interface for HM and EM to emulate the WBINVD instruction.
10497 *
10498 * @returns Strict VBox status code.
10499 * @param pVCpu The cross context virtual CPU structure.
10500 * @param cbInstr The instruction length in bytes.
10501 *
10502 * @remarks In ring-0 not all of the state needs to be synced in.
10503 */
10504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10505{
10506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10507
10508 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10509 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10510 Assert(!pVCpu->iem.s.cActiveMappings);
10511 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10512}
10513
10514
10515/**
10516 * Interface for HM and EM to emulate the INVD instruction.
10517 *
10518 * @returns Strict VBox status code.
10519 * @param pVCpu The cross context virtual CPU structure.
10520 * @param cbInstr The instruction length in bytes.
10521 *
10522 * @remarks In ring-0 not all of the state needs to be synced in.
10523 */
10524VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10525{
10526 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10527
10528 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10529 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10530 Assert(!pVCpu->iem.s.cActiveMappings);
10531 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10532}
10533
10534
10535/**
10536 * Interface for HM and EM to emulate the INVLPG instruction.
10537 *
10538 * @returns Strict VBox status code.
10539 * @retval VINF_PGM_SYNC_CR3
10540 *
10541 * @param pVCpu The cross context virtual CPU structure.
10542 * @param cbInstr The instruction length in bytes.
10543 * @param GCPtrPage The effective address of the page to invalidate.
10544 *
10545 * @remarks In ring-0 not all of the state needs to be synced in.
10546 */
10547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10548{
10549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10550
10551 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10552 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10553 Assert(!pVCpu->iem.s.cActiveMappings);
10554 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10555}
10556
10557
10558/**
10559 * Interface for HM and EM to emulate the INVPCID instruction.
10560 *
10561 * @returns Strict VBox status code.
10562 * @retval VINF_PGM_SYNC_CR3
10563 *
10564 * @param pVCpu The cross context virtual CPU structure.
10565 * @param cbInstr The instruction length in bytes.
10566 * @param iEffSeg The effective segment register.
10567 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10568 * @param uType The invalidation type.
10569 *
10570 * @remarks In ring-0 not all of the state needs to be synced in.
10571 */
10572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10573 uint64_t uType)
10574{
10575 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10576
10577 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10578 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10579 Assert(!pVCpu->iem.s.cActiveMappings);
10580 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10581}
10582
10583
10584/**
10585 * Interface for HM and EM to emulate the CPUID instruction.
10586 *
10587 * @returns Strict VBox status code.
10588 *
10589 * @param pVCpu The cross context virtual CPU structure.
10590 * @param cbInstr The instruction length in bytes.
10591 *
10592 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10593 */
10594VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10595{
10596 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10597 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10598
10599 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10600 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10601 Assert(!pVCpu->iem.s.cActiveMappings);
10602 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10603}
10604
10605
10606/**
10607 * Interface for HM and EM to emulate the RDPMC instruction.
10608 *
10609 * @returns Strict VBox status code.
10610 *
10611 * @param pVCpu The cross context virtual CPU structure.
10612 * @param cbInstr The instruction length in bytes.
10613 *
10614 * @remarks Not all of the state needs to be synced in.
10615 */
10616VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10617{
10618 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10619 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10620
10621 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10622 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10623 Assert(!pVCpu->iem.s.cActiveMappings);
10624 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10625}
10626
10627
10628/**
10629 * Interface for HM and EM to emulate the RDTSC instruction.
10630 *
10631 * @returns Strict VBox status code.
10632 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10633 *
10634 * @param pVCpu The cross context virtual CPU structure.
10635 * @param cbInstr The instruction length in bytes.
10636 *
10637 * @remarks Not all of the state needs to be synced in.
10638 */
10639VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10640{
10641 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10642 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10643
10644 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10645 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10646 Assert(!pVCpu->iem.s.cActiveMappings);
10647 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10648}
10649
10650
10651/**
10652 * Interface for HM and EM to emulate the RDTSCP instruction.
10653 *
10654 * @returns Strict VBox status code.
10655 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10656 *
10657 * @param pVCpu The cross context virtual CPU structure.
10658 * @param cbInstr The instruction length in bytes.
10659 *
10660 * @remarks Not all of the state needs to be synced in. Recommended
10661 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10662 */
10663VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10664{
10665 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10666 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10667
10668 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10669 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10670 Assert(!pVCpu->iem.s.cActiveMappings);
10671 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10672}
10673
10674
10675/**
10676 * Interface for HM and EM to emulate the RDMSR instruction.
10677 *
10678 * @returns Strict VBox status code.
10679 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10680 *
10681 * @param pVCpu The cross context virtual CPU structure.
10682 * @param cbInstr The instruction length in bytes.
10683 *
10684 * @remarks Not all of the state needs to be synced in. Requires RCX and
10685 * (currently) all MSRs.
10686 */
10687VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10688{
10689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10690 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10691
10692 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10693 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10694 Assert(!pVCpu->iem.s.cActiveMappings);
10695 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10696}
10697
10698
10699/**
10700 * Interface for HM and EM to emulate the WRMSR instruction.
10701 *
10702 * @returns Strict VBox status code.
10703 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10704 *
10705 * @param pVCpu The cross context virtual CPU structure.
10706 * @param cbInstr The instruction length in bytes.
10707 *
10708 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10709 * and (currently) all MSRs.
10710 */
10711VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10712{
10713 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10714 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10715 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10716
10717 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10719 Assert(!pVCpu->iem.s.cActiveMappings);
10720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10721}
10722
10723
10724/**
10725 * Interface for HM and EM to emulate the MONITOR instruction.
10726 *
10727 * @returns Strict VBox status code.
10728 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10729 *
10730 * @param pVCpu The cross context virtual CPU structure.
10731 * @param cbInstr The instruction length in bytes.
10732 *
10733 * @remarks Not all of the state needs to be synced in.
10734 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10735 * are used.
10736 */
10737VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10738{
10739 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10740 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10741
10742 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10743 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10744 Assert(!pVCpu->iem.s.cActiveMappings);
10745 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10746}
10747
10748
10749/**
10750 * Interface for HM and EM to emulate the MWAIT instruction.
10751 *
10752 * @returns Strict VBox status code.
10753 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10754 *
10755 * @param pVCpu The cross context virtual CPU structure.
10756 * @param cbInstr The instruction length in bytes.
10757 *
10758 * @remarks Not all of the state needs to be synced in.
10759 */
10760VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10761{
10762 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10763 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10764
10765 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10766 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10767 Assert(!pVCpu->iem.s.cActiveMappings);
10768 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10769}
10770
10771
10772/**
10773 * Interface for HM and EM to emulate the HLT instruction.
10774 *
10775 * @returns Strict VBox status code.
10776 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10777 *
10778 * @param pVCpu The cross context virtual CPU structure.
10779 * @param cbInstr The instruction length in bytes.
10780 *
10781 * @remarks Not all of the state needs to be synced in.
10782 */
10783VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10784{
10785 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10786
10787 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10788 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10789 Assert(!pVCpu->iem.s.cActiveMappings);
10790 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10791}
10792
10793
10794/**
10795 * Checks if IEM is in the process of delivering an event (interrupt or
10796 * exception).
10797 *
10798 * @returns true if we're in the process of raising an interrupt or exception,
10799 * false otherwise.
10800 * @param pVCpu The cross context virtual CPU structure.
10801 * @param puVector Where to store the vector associated with the
10802 * currently delivered event, optional.
10803 * @param pfFlags Where to store th event delivery flags (see
10804 * IEM_XCPT_FLAGS_XXX), optional.
10805 * @param puErr Where to store the error code associated with the
10806 * event, optional.
10807 * @param puCr2 Where to store the CR2 associated with the event,
10808 * optional.
10809 * @remarks The caller should check the flags to determine if the error code and
10810 * CR2 are valid for the event.
10811 */
10812VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10813{
10814 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10815 if (fRaisingXcpt)
10816 {
10817 if (puVector)
10818 *puVector = pVCpu->iem.s.uCurXcpt;
10819 if (pfFlags)
10820 *pfFlags = pVCpu->iem.s.fCurXcpt;
10821 if (puErr)
10822 *puErr = pVCpu->iem.s.uCurXcptErr;
10823 if (puCr2)
10824 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10825 }
10826 return fRaisingXcpt;
10827}
10828
10829#ifdef IN_RING3
10830
10831/**
10832 * Handles the unlikely and probably fatal merge cases.
10833 *
10834 * @returns Merged status code.
10835 * @param rcStrict Current EM status code.
10836 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10837 * with @a rcStrict.
10838 * @param iMemMap The memory mapping index. For error reporting only.
10839 * @param pVCpu The cross context virtual CPU structure of the calling
10840 * thread, for error reporting only.
10841 */
10842DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10843 unsigned iMemMap, PVMCPUCC pVCpu)
10844{
10845 if (RT_FAILURE_NP(rcStrict))
10846 return rcStrict;
10847
10848 if (RT_FAILURE_NP(rcStrictCommit))
10849 return rcStrictCommit;
10850
10851 if (rcStrict == rcStrictCommit)
10852 return rcStrictCommit;
10853
10854 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10855 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10856 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10859 return VERR_IOM_FF_STATUS_IPE;
10860}
10861
10862
10863/**
10864 * Helper for IOMR3ProcessForceFlag.
10865 *
10866 * @returns Merged status code.
10867 * @param rcStrict Current EM status code.
10868 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10869 * with @a rcStrict.
10870 * @param iMemMap The memory mapping index. For error reporting only.
10871 * @param pVCpu The cross context virtual CPU structure of the calling
10872 * thread, for error reporting only.
10873 */
10874DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10875{
10876 /* Simple. */
10877 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10878 return rcStrictCommit;
10879
10880 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10881 return rcStrict;
10882
10883 /* EM scheduling status codes. */
10884 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10885 && rcStrict <= VINF_EM_LAST))
10886 {
10887 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10888 && rcStrictCommit <= VINF_EM_LAST))
10889 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10890 }
10891
10892 /* Unlikely */
10893 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10894}
10895
10896
10897/**
10898 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10899 *
10900 * @returns Merge between @a rcStrict and what the commit operation returned.
10901 * @param pVM The cross context VM structure.
10902 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10903 * @param rcStrict The status code returned by ring-0 or raw-mode.
10904 */
10905VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10906{
10907 /*
10908 * Reset the pending commit.
10909 */
10910 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10911 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10912 ("%#x %#x %#x\n",
10913 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10914 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10915
10916 /*
10917 * Commit the pending bounce buffers (usually just one).
10918 */
10919 unsigned cBufs = 0;
10920 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10921 while (iMemMap-- > 0)
10922 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10923 {
10924 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10925 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10926 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10927
10928 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10929 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10930 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10931
10932 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10933 {
10934 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10936 pbBuf,
10937 cbFirst,
10938 PGMACCESSORIGIN_IEM);
10939 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10940 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10941 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10942 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10943 }
10944
10945 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10946 {
10947 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10949 pbBuf + cbFirst,
10950 cbSecond,
10951 PGMACCESSORIGIN_IEM);
10952 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10953 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10954 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10955 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10956 }
10957 cBufs++;
10958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10959 }
10960
10961 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10962 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10963 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10964 pVCpu->iem.s.cActiveMappings = 0;
10965 return rcStrict;
10966}
10967
10968#endif /* IN_RING3 */
10969
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette