VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100050

Last change on this file since 100050 was 99996, checked in by vboxsync, 22 months ago

VMM/IEM: Stripped down iemReInitExec to what it's supposed to be doing.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 483.0 KB
Line 
1/* $Id: IEMAll.cpp 99996 2023-05-27 00:24:43Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Slow path of iemInitDecoder() and iemInitExec() that checks what kind of
177 * breakpoints are enabled.
178 *
179 * @param pVCpu The cross context virtual CPU structure of the
180 * calling thread.
181 */
182void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu)
183{
184 /*
185 * Process guest breakpoints.
186 */
187#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
188 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
189 { \
190 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
191 { \
192 case X86_DR7_RW_EO: \
193 pVCpu->iem.s.fPendingInstructionBreakpoints = true; \
194 break; \
195 case X86_DR7_RW_WO: \
196 case X86_DR7_RW_RW: \
197 pVCpu->iem.s.fPendingDataBreakpoints = true; \
198 break; \
199 case X86_DR7_RW_IO: \
200 pVCpu->iem.s.fPendingIoBreakpoints = true; \
201 break; \
202 } \
203 } \
204 } while (0)
205 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
206 if (fGstDr7 & X86_DR7_ENABLED_MASK)
207 {
208 PROCESS_ONE_BP(fGstDr7, 0);
209 PROCESS_ONE_BP(fGstDr7, 1);
210 PROCESS_ONE_BP(fGstDr7, 2);
211 PROCESS_ONE_BP(fGstDr7, 3);
212 }
213
214 /*
215 * Process hypervisor breakpoints.
216 */
217 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
218 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
219 {
220 PROCESS_ONE_BP(fHyperDr7, 0);
221 PROCESS_ONE_BP(fHyperDr7, 1);
222 PROCESS_ONE_BP(fHyperDr7, 2);
223 PROCESS_ONE_BP(fHyperDr7, 3);
224 }
225}
226
227
228/**
229 * Initializes the decoder state.
230 *
231 * iemReInitDecoder is mostly a copy of this function.
232 *
233 * @param pVCpu The cross context virtual CPU structure of the
234 * calling thread.
235 * @param fBypassHandlers Whether to bypass access handlers.
236 * @param fDisregardLock Whether to disregard the LOCK prefix.
237 */
238DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
239{
240 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
241 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
244 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
247 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
250
251 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
252 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
253 pVCpu->iem.s.enmCpuMode = enmMode;
254 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
255 pVCpu->iem.s.enmEffAddrMode = enmMode;
256 if (enmMode != IEMMODE_64BIT)
257 {
258 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
259 pVCpu->iem.s.enmEffOpSize = enmMode;
260 }
261 else
262 {
263 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
264 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
265 }
266 pVCpu->iem.s.fPrefixes = 0;
267 pVCpu->iem.s.uRexReg = 0;
268 pVCpu->iem.s.uRexB = 0;
269 pVCpu->iem.s.uRexIndex = 0;
270 pVCpu->iem.s.idxPrefix = 0;
271 pVCpu->iem.s.uVex3rdReg = 0;
272 pVCpu->iem.s.uVexLength = 0;
273 pVCpu->iem.s.fEvexStuff = 0;
274 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
275#ifdef IEM_WITH_CODE_TLB
276 pVCpu->iem.s.pbInstrBuf = NULL;
277 pVCpu->iem.s.offInstrNextByte = 0;
278 pVCpu->iem.s.offCurInstrStart = 0;
279# ifdef VBOX_STRICT
280 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
281 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
282 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
283 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
284# endif
285#else
286 pVCpu->iem.s.offOpcode = 0;
287 pVCpu->iem.s.cbOpcode = 0;
288#endif
289 pVCpu->iem.s.offModRm = 0;
290 pVCpu->iem.s.cActiveMappings = 0;
291 pVCpu->iem.s.iNextMapping = 0;
292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
293 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
294 pVCpu->iem.s.fDisregardLock = fDisregardLock;
295 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
296 pVCpu->iem.s.fPendingDataBreakpoints = false;
297 pVCpu->iem.s.fPendingIoBreakpoints = false;
298 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
299 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
300 { /* likely */ }
301 else
302 iemInitPendingBreakpointsSlow(pVCpu);
303
304#ifdef DBGFTRACE_ENABLED
305 switch (enmMode)
306 {
307 case IEMMODE_64BIT:
308 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
309 break;
310 case IEMMODE_32BIT:
311 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
312 break;
313 case IEMMODE_16BIT:
314 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
315 break;
316 }
317#endif
318}
319
320
321/**
322 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
323 *
324 * This is mostly a copy of iemInitDecoder.
325 *
326 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
327 */
328DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
329{
330 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
339
340 Assert(pVCpu->iem.s.uCpl == CPUMGetGuestCPL(pVCpu)); /* ASSUMES: Anyone changing CPL will adjust iem.s.uCpl. */
341 IEMMODE const enmMode = pVCpu->iem.s.enmCpuMode;
342 Assert(enmMode == iemCalcCpuMode(pVCpu)); /* ASSUMES: Anyone changing the CPU mode will adjust iem.s.enmCpuMode. */
343 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
344 pVCpu->iem.s.enmEffAddrMode = enmMode;
345 if (enmMode != IEMMODE_64BIT)
346 {
347 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
348 pVCpu->iem.s.enmEffOpSize = enmMode;
349 }
350 else
351 {
352 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
353 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
354 }
355 pVCpu->iem.s.fPrefixes = 0;
356 pVCpu->iem.s.uRexReg = 0;
357 pVCpu->iem.s.uRexB = 0;
358 pVCpu->iem.s.uRexIndex = 0;
359 pVCpu->iem.s.idxPrefix = 0;
360 pVCpu->iem.s.uVex3rdReg = 0;
361 pVCpu->iem.s.uVexLength = 0;
362 pVCpu->iem.s.fEvexStuff = 0;
363 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
364#ifdef IEM_WITH_CODE_TLB
365 if (pVCpu->iem.s.pbInstrBuf)
366 {
367 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
368 ? pVCpu->cpum.GstCtx.rip
369 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
370 - pVCpu->iem.s.uInstrBufPc;
371 if (off < pVCpu->iem.s.cbInstrBufTotal)
372 {
373 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
374 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
375 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
376 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
377 else
378 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
379 }
380 else
381 {
382 pVCpu->iem.s.pbInstrBuf = NULL;
383 pVCpu->iem.s.offInstrNextByte = 0;
384 pVCpu->iem.s.offCurInstrStart = 0;
385 pVCpu->iem.s.cbInstrBuf = 0;
386 pVCpu->iem.s.cbInstrBufTotal = 0;
387 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
388 }
389 }
390 else
391 {
392 pVCpu->iem.s.offInstrNextByte = 0;
393 pVCpu->iem.s.offCurInstrStart = 0;
394 pVCpu->iem.s.cbInstrBuf = 0;
395 pVCpu->iem.s.cbInstrBufTotal = 0;
396# ifdef VBOX_STRICT
397 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
398# endif
399 }
400#else
401 pVCpu->iem.s.cbOpcode = 0;
402 pVCpu->iem.s.offOpcode = 0;
403#endif
404 pVCpu->iem.s.offModRm = 0;
405 Assert(pVCpu->iem.s.cActiveMappings == 0);
406 pVCpu->iem.s.iNextMapping = 0;
407 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
408 Assert(pVCpu->iem.s.fBypassHandlers == false);
409
410#ifdef DBGFTRACE_ENABLED
411 switch (enmMode)
412 {
413 case IEMMODE_64BIT:
414 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
415 break;
416 case IEMMODE_32BIT:
417 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
418 break;
419 case IEMMODE_16BIT:
420 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
421 break;
422 }
423#endif
424}
425
426
427
428/**
429 * Prefetch opcodes the first time when starting executing.
430 *
431 * @returns Strict VBox status code.
432 * @param pVCpu The cross context virtual CPU structure of the
433 * calling thread.
434 * @param fBypassHandlers Whether to bypass access handlers.
435 * @param fDisregardLock Whether to disregard LOCK prefixes.
436 *
437 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
438 * store them as such.
439 */
440static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
441{
442 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
443
444#ifndef IEM_WITH_CODE_TLB
445 /*
446 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
447 *
448 * First translate CS:rIP to a physical address.
449 *
450 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
451 * all relevant bytes from the first page, as it ASSUMES it's only ever
452 * called for dealing with CS.LIM, page crossing and instructions that
453 * are too long.
454 */
455 uint32_t cbToTryRead;
456 RTGCPTR GCPtrPC;
457 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
458 {
459 cbToTryRead = GUEST_PAGE_SIZE;
460 GCPtrPC = pVCpu->cpum.GstCtx.rip;
461 if (IEM_IS_CANONICAL(GCPtrPC))
462 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
463 else
464 return iemRaiseGeneralProtectionFault0(pVCpu);
465 }
466 else
467 {
468 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
469 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
470 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
471 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
472 else
473 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
474 if (cbToTryRead) { /* likely */ }
475 else /* overflowed */
476 {
477 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
478 cbToTryRead = UINT32_MAX;
479 }
480 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
481 Assert(GCPtrPC <= UINT32_MAX);
482 }
483
484 PGMPTWALK Walk;
485 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
486 if (RT_SUCCESS(rc))
487 Assert(Walk.fSucceeded); /* probable. */
488 else
489 {
490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
491# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
492 if (Walk.fFailed & PGM_WALKFAIL_EPT)
493 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
494# endif
495 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
496 }
497 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
498 else
499 {
500 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
501# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
502 if (Walk.fFailed & PGM_WALKFAIL_EPT)
503 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
504# endif
505 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
506 }
507 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
508 else
509 {
510 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
511# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
512 if (Walk.fFailed & PGM_WALKFAIL_EPT)
513 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
514# endif
515 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
516 }
517 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
518 /** @todo Check reserved bits and such stuff. PGM is better at doing
519 * that, so do it when implementing the guest virtual address
520 * TLB... */
521
522 /*
523 * Read the bytes at this address.
524 */
525 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
526 if (cbToTryRead > cbLeftOnPage)
527 cbToTryRead = cbLeftOnPage;
528 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
529 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
530
531 if (!pVCpu->iem.s.fBypassHandlers)
532 {
533 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
534 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
535 { /* likely */ }
536 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
539 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
540 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
541 }
542 else
543 {
544 Log((RT_SUCCESS(rcStrict)
545 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
546 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
547 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
548 return rcStrict;
549 }
550 }
551 else
552 {
553 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
554 if (RT_SUCCESS(rc))
555 { /* likely */ }
556 else
557 {
558 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, rc, cbToTryRead));
560 return rc;
561 }
562 }
563 pVCpu->iem.s.cbOpcode = cbToTryRead;
564#endif /* !IEM_WITH_CODE_TLB */
565 return VINF_SUCCESS;
566}
567
568
569/**
570 * Invalidates the IEM TLBs.
571 *
572 * This is called internally as well as by PGM when moving GC mappings.
573 *
574 * @param pVCpu The cross context virtual CPU structure of the calling
575 * thread.
576 */
577VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
578{
579#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
580 Log10(("IEMTlbInvalidateAll\n"));
581# ifdef IEM_WITH_CODE_TLB
582 pVCpu->iem.s.cbInstrBufTotal = 0;
583 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
584 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
585 { /* very likely */ }
586 else
587 {
588 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
589 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
590 while (i-- > 0)
591 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
592 }
593# endif
594
595# ifdef IEM_WITH_DATA_TLB
596 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
597 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
598 { /* very likely */ }
599 else
600 {
601 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
602 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
603 while (i-- > 0)
604 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
605 }
606# endif
607#else
608 RT_NOREF(pVCpu);
609#endif
610}
611
612
613/**
614 * Invalidates a page in the TLBs.
615 *
616 * @param pVCpu The cross context virtual CPU structure of the calling
617 * thread.
618 * @param GCPtr The address of the page to invalidate
619 * @thread EMT(pVCpu)
620 */
621VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
622{
623#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
624 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
625 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
626 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
627 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
628
629# ifdef IEM_WITH_CODE_TLB
630 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
631 {
632 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
633 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
634 pVCpu->iem.s.cbInstrBufTotal = 0;
635 }
636# endif
637
638# ifdef IEM_WITH_DATA_TLB
639 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
640 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
641# endif
642#else
643 NOREF(pVCpu); NOREF(GCPtr);
644#endif
645}
646
647
648#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
649/**
650 * Invalid both TLBs slow fashion following a rollover.
651 *
652 * Worker for IEMTlbInvalidateAllPhysical,
653 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
654 * iemMemMapJmp and others.
655 *
656 * @thread EMT(pVCpu)
657 */
658static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
659{
660 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
661 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
662 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
663
664 unsigned i;
665# ifdef IEM_WITH_CODE_TLB
666 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
667 while (i-- > 0)
668 {
669 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
670 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
671 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
672 }
673# endif
674# ifdef IEM_WITH_DATA_TLB
675 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
676 while (i-- > 0)
677 {
678 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
679 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
680 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
681 }
682# endif
683
684}
685#endif
686
687
688/**
689 * Invalidates the host physical aspects of the IEM TLBs.
690 *
691 * This is called internally as well as by PGM when moving GC mappings.
692 *
693 * @param pVCpu The cross context virtual CPU structure of the calling
694 * thread.
695 * @note Currently not used.
696 */
697VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
698{
699#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
700 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
701 Log10(("IEMTlbInvalidateAllPhysical\n"));
702
703# ifdef IEM_WITH_CODE_TLB
704 pVCpu->iem.s.cbInstrBufTotal = 0;
705# endif
706 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
707 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
708 {
709 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
710 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
711 }
712 else
713 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
714#else
715 NOREF(pVCpu);
716#endif
717}
718
719
720/**
721 * Invalidates the host physical aspects of the IEM TLBs.
722 *
723 * This is called internally as well as by PGM when moving GC mappings.
724 *
725 * @param pVM The cross context VM structure.
726 * @param idCpuCaller The ID of the calling EMT if available to the caller,
727 * otherwise NIL_VMCPUID.
728 *
729 * @remarks Caller holds the PGM lock.
730 */
731VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
732{
733#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
734 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
735 if (pVCpuCaller)
736 VMCPU_ASSERT_EMT(pVCpuCaller);
737 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
738
739 VMCC_FOR_EACH_VMCPU(pVM)
740 {
741# ifdef IEM_WITH_CODE_TLB
742 if (pVCpuCaller == pVCpu)
743 pVCpu->iem.s.cbInstrBufTotal = 0;
744# endif
745
746 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
747 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
748 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
749 { /* likely */}
750 else if (pVCpuCaller == pVCpu)
751 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
752 else
753 {
754 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
755 continue;
756 }
757 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
758 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
759 }
760 VMCC_FOR_EACH_VMCPU_END(pVM);
761
762#else
763 RT_NOREF(pVM, idCpuCaller);
764#endif
765}
766
767
768/**
769 * Flushes the prefetch buffer, light version.
770 */
771void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
772{
773#ifndef IEM_WITH_CODE_TLB
774 pVCpu->iem.s.cbOpcode = cbInstr;
775#else
776 RT_NOREF(pVCpu, cbInstr);
777#endif
778}
779
780
781/**
782 * Flushes the prefetch buffer, heavy version.
783 */
784void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
785{
786#ifndef IEM_WITH_CODE_TLB
787 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
788#elif 1
789 pVCpu->iem.s.pbInstrBuf = NULL;
790 RT_NOREF(cbInstr);
791#else
792 RT_NOREF(pVCpu, cbInstr);
793#endif
794}
795
796
797
798#ifdef IEM_WITH_CODE_TLB
799
800/**
801 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
802 * failure and jumps.
803 *
804 * We end up here for a number of reasons:
805 * - pbInstrBuf isn't yet initialized.
806 * - Advancing beyond the buffer boundrary (e.g. cross page).
807 * - Advancing beyond the CS segment limit.
808 * - Fetching from non-mappable page (e.g. MMIO).
809 *
810 * @param pVCpu The cross context virtual CPU structure of the
811 * calling thread.
812 * @param pvDst Where to return the bytes.
813 * @param cbDst Number of bytes to read.
814 *
815 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
816 */
817void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
818{
819# ifdef IN_RING3
820 for (;;)
821 {
822 Assert(cbDst <= 8);
823 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
824
825 /*
826 * We might have a partial buffer match, deal with that first to make the
827 * rest simpler. This is the first part of the cross page/buffer case.
828 */
829 if (pVCpu->iem.s.pbInstrBuf != NULL)
830 {
831 if (offBuf < pVCpu->iem.s.cbInstrBuf)
832 {
833 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
834 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
835 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
836
837 cbDst -= cbCopy;
838 pvDst = (uint8_t *)pvDst + cbCopy;
839 offBuf += cbCopy;
840 pVCpu->iem.s.offInstrNextByte += offBuf;
841 }
842 }
843
844 /*
845 * Check segment limit, figuring how much we're allowed to access at this point.
846 *
847 * We will fault immediately if RIP is past the segment limit / in non-canonical
848 * territory. If we do continue, there are one or more bytes to read before we
849 * end up in trouble and we need to do that first before faulting.
850 */
851 RTGCPTR GCPtrFirst;
852 uint32_t cbMaxRead;
853 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
854 {
855 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
856 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
857 { /* likely */ }
858 else
859 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
860 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
861 }
862 else
863 {
864 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
865 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
866 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
867 { /* likely */ }
868 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
869 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
870 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
871 if (cbMaxRead != 0)
872 { /* likely */ }
873 else
874 {
875 /* Overflowed because address is 0 and limit is max. */
876 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
877 cbMaxRead = X86_PAGE_SIZE;
878 }
879 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
880 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
881 if (cbMaxRead2 < cbMaxRead)
882 cbMaxRead = cbMaxRead2;
883 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
884 }
885
886 /*
887 * Get the TLB entry for this piece of code.
888 */
889 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
890 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
891 if (pTlbe->uTag == uTag)
892 {
893 /* likely when executing lots of code, otherwise unlikely */
894# ifdef VBOX_WITH_STATISTICS
895 pVCpu->iem.s.CodeTlb.cTlbHits++;
896# endif
897 }
898 else
899 {
900 pVCpu->iem.s.CodeTlb.cTlbMisses++;
901 PGMPTWALK Walk;
902 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
903 if (RT_FAILURE(rc))
904 {
905#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
906 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
907 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
908#endif
909 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
910 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
911 }
912
913 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
914 Assert(Walk.fSucceeded);
915 pTlbe->uTag = uTag;
916 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
917 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
918 pTlbe->GCPhys = Walk.GCPhys;
919 pTlbe->pbMappingR3 = NULL;
920 }
921
922 /*
923 * Check TLB page table level access flags.
924 */
925 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
926 {
927 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
928 {
929 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
930 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
931 }
932 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
933 {
934 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
935 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
936 }
937 }
938
939 /*
940 * Look up the physical page info if necessary.
941 */
942 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
943 { /* not necessary */ }
944 else
945 {
946 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
947 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
948 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
949 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
950 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
951 { /* likely */ }
952 else
953 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
954 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
955 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
956 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
957 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
958 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
959 }
960
961# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
962 /*
963 * Try do a direct read using the pbMappingR3 pointer.
964 */
965 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
966 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
967 {
968 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
969 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
970 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
971 {
972 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
973 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
974 }
975 else
976 {
977 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
978 if (cbInstr + (uint32_t)cbDst <= 15)
979 {
980 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
981 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
982 }
983 else
984 {
985 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
986 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
987 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
988 }
989 }
990 if (cbDst <= cbMaxRead)
991 {
992 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
993 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
994 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
995 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
996 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
997 return;
998 }
999 pVCpu->iem.s.pbInstrBuf = NULL;
1000
1001 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1002 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1003 }
1004# else
1005# error "refactor as needed"
1006 /*
1007 * If there is no special read handling, so we can read a bit more and
1008 * put it in the prefetch buffer.
1009 */
1010 if ( cbDst < cbMaxRead
1011 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1012 {
1013 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1014 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1015 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1016 { /* likely */ }
1017 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1018 {
1019 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1020 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1021 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1022 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1023 }
1024 else
1025 {
1026 Log((RT_SUCCESS(rcStrict)
1027 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1028 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1029 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1030 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1031 }
1032 }
1033# endif
1034 /*
1035 * Special read handling, so only read exactly what's needed.
1036 * This is a highly unlikely scenario.
1037 */
1038 else
1039 {
1040 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1041
1042 /* Check instruction length. */
1043 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1044 if (RT_LIKELY(cbInstr + cbDst <= 15))
1045 { /* likely */ }
1046 else
1047 {
1048 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1049 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1050 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1051 }
1052
1053 /* Do the reading. */
1054 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1055 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1056 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1057 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1058 { /* likely */ }
1059 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1060 {
1061 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1062 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1063 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1064 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1065 }
1066 else
1067 {
1068 Log((RT_SUCCESS(rcStrict)
1069 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1070 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1071 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1072 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1073 }
1074 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1075 if (cbToRead == cbDst)
1076 return;
1077 }
1078
1079 /*
1080 * More to read, loop.
1081 */
1082 cbDst -= cbMaxRead;
1083 pvDst = (uint8_t *)pvDst + cbMaxRead;
1084 }
1085# else /* !IN_RING3 */
1086 RT_NOREF(pvDst, cbDst);
1087 if (pvDst || cbDst)
1088 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1089# endif /* !IN_RING3 */
1090}
1091
1092#else
1093
1094/**
1095 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1096 * exception if it fails.
1097 *
1098 * @returns Strict VBox status code.
1099 * @param pVCpu The cross context virtual CPU structure of the
1100 * calling thread.
1101 * @param cbMin The minimum number of bytes relative offOpcode
1102 * that must be read.
1103 */
1104VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1105{
1106 /*
1107 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1108 *
1109 * First translate CS:rIP to a physical address.
1110 */
1111 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1112 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1113 uint8_t const cbLeft = cbOpcode - offOpcode;
1114 Assert(cbLeft < cbMin);
1115 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1116
1117 uint32_t cbToTryRead;
1118 RTGCPTR GCPtrNext;
1119 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1120 {
1121 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1122 if (!IEM_IS_CANONICAL(GCPtrNext))
1123 return iemRaiseGeneralProtectionFault0(pVCpu);
1124 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1125 }
1126 else
1127 {
1128 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1129 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1130 GCPtrNext32 += cbOpcode;
1131 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1132 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1133 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1134 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1135 if (!cbToTryRead) /* overflowed */
1136 {
1137 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1138 cbToTryRead = UINT32_MAX;
1139 /** @todo check out wrapping around the code segment. */
1140 }
1141 if (cbToTryRead < cbMin - cbLeft)
1142 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1143 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1144
1145 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1146 if (cbToTryRead > cbLeftOnPage)
1147 cbToTryRead = cbLeftOnPage;
1148 }
1149
1150 /* Restrict to opcode buffer space.
1151
1152 We're making ASSUMPTIONS here based on work done previously in
1153 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1154 be fetched in case of an instruction crossing two pages. */
1155 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1156 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1157 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1158 { /* likely */ }
1159 else
1160 {
1161 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1162 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1163 return iemRaiseGeneralProtectionFault0(pVCpu);
1164 }
1165
1166 PGMPTWALK Walk;
1167 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1168 if (RT_FAILURE(rc))
1169 {
1170 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1172 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1173 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1174#endif
1175 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1176 }
1177 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1178 {
1179 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1180#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1181 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1182 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1183#endif
1184 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1185 }
1186 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1187 {
1188 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1189#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1190 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1191 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1192#endif
1193 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1196 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1197 /** @todo Check reserved bits and such stuff. PGM is better at doing
1198 * that, so do it when implementing the guest virtual address
1199 * TLB... */
1200
1201 /*
1202 * Read the bytes at this address.
1203 *
1204 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1205 * and since PATM should only patch the start of an instruction there
1206 * should be no need to check again here.
1207 */
1208 if (!pVCpu->iem.s.fBypassHandlers)
1209 {
1210 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1211 cbToTryRead, PGMACCESSORIGIN_IEM);
1212 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1213 { /* likely */ }
1214 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1215 {
1216 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1217 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1218 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1219 }
1220 else
1221 {
1222 Log((RT_SUCCESS(rcStrict)
1223 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1224 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1225 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1226 return rcStrict;
1227 }
1228 }
1229 else
1230 {
1231 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1232 if (RT_SUCCESS(rc))
1233 { /* likely */ }
1234 else
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1237 return rc;
1238 }
1239 }
1240 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1241 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1242
1243 return VINF_SUCCESS;
1244}
1245
1246#endif /* !IEM_WITH_CODE_TLB */
1247#ifndef IEM_WITH_SETJMP
1248
1249/**
1250 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1251 *
1252 * @returns Strict VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure of the
1254 * calling thread.
1255 * @param pb Where to return the opcode byte.
1256 */
1257VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1258{
1259 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1260 if (rcStrict == VINF_SUCCESS)
1261 {
1262 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1263 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1264 pVCpu->iem.s.offOpcode = offOpcode + 1;
1265 }
1266 else
1267 *pb = 0;
1268 return rcStrict;
1269}
1270
1271#else /* IEM_WITH_SETJMP */
1272
1273/**
1274 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1275 *
1276 * @returns The opcode byte.
1277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1278 */
1279uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1280{
1281# ifdef IEM_WITH_CODE_TLB
1282 uint8_t u8;
1283 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1284 return u8;
1285# else
1286 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1287 if (rcStrict == VINF_SUCCESS)
1288 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1289 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1290# endif
1291}
1292
1293#endif /* IEM_WITH_SETJMP */
1294
1295#ifndef IEM_WITH_SETJMP
1296
1297/**
1298 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1299 *
1300 * @returns Strict VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1302 * @param pu16 Where to return the opcode dword.
1303 */
1304VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1305{
1306 uint8_t u8;
1307 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1308 if (rcStrict == VINF_SUCCESS)
1309 *pu16 = (int8_t)u8;
1310 return rcStrict;
1311}
1312
1313
1314/**
1315 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1316 *
1317 * @returns Strict VBox status code.
1318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1319 * @param pu32 Where to return the opcode dword.
1320 */
1321VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1322{
1323 uint8_t u8;
1324 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1325 if (rcStrict == VINF_SUCCESS)
1326 *pu32 = (int8_t)u8;
1327 return rcStrict;
1328}
1329
1330
1331/**
1332 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1333 *
1334 * @returns Strict VBox status code.
1335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1336 * @param pu64 Where to return the opcode qword.
1337 */
1338VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1339{
1340 uint8_t u8;
1341 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1342 if (rcStrict == VINF_SUCCESS)
1343 *pu64 = (int8_t)u8;
1344 return rcStrict;
1345}
1346
1347#endif /* !IEM_WITH_SETJMP */
1348
1349
1350#ifndef IEM_WITH_SETJMP
1351
1352/**
1353 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1354 *
1355 * @returns Strict VBox status code.
1356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1357 * @param pu16 Where to return the opcode word.
1358 */
1359VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1360{
1361 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1362 if (rcStrict == VINF_SUCCESS)
1363 {
1364 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1365# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1366 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1367# else
1368 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1369# endif
1370 pVCpu->iem.s.offOpcode = offOpcode + 2;
1371 }
1372 else
1373 *pu16 = 0;
1374 return rcStrict;
1375}
1376
1377#else /* IEM_WITH_SETJMP */
1378
1379/**
1380 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1381 *
1382 * @returns The opcode word.
1383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1384 */
1385uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1386{
1387# ifdef IEM_WITH_CODE_TLB
1388 uint16_t u16;
1389 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1390 return u16;
1391# else
1392 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1393 if (rcStrict == VINF_SUCCESS)
1394 {
1395 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1396 pVCpu->iem.s.offOpcode += 2;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1401# endif
1402 }
1403 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1404# endif
1405}
1406
1407#endif /* IEM_WITH_SETJMP */
1408
1409#ifndef IEM_WITH_SETJMP
1410
1411/**
1412 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1416 * @param pu32 Where to return the opcode double word.
1417 */
1418VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1419{
1420 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1421 if (rcStrict == VINF_SUCCESS)
1422 {
1423 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1424 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1425 pVCpu->iem.s.offOpcode = offOpcode + 2;
1426 }
1427 else
1428 *pu32 = 0;
1429 return rcStrict;
1430}
1431
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1435 *
1436 * @returns Strict VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 * @param pu64 Where to return the opcode quad word.
1439 */
1440VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1441{
1442 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1443 if (rcStrict == VINF_SUCCESS)
1444 {
1445 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1446 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1447 pVCpu->iem.s.offOpcode = offOpcode + 2;
1448 }
1449 else
1450 *pu64 = 0;
1451 return rcStrict;
1452}
1453
1454#endif /* !IEM_WITH_SETJMP */
1455
1456#ifndef IEM_WITH_SETJMP
1457
1458/**
1459 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1463 * @param pu32 Where to return the opcode dword.
1464 */
1465VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1466{
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1471# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1472 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1473# else
1474 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1475 pVCpu->iem.s.abOpcode[offOpcode + 1],
1476 pVCpu->iem.s.abOpcode[offOpcode + 2],
1477 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1478# endif
1479 pVCpu->iem.s.offOpcode = offOpcode + 4;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486#else /* IEM_WITH_SETJMP */
1487
1488/**
1489 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1490 *
1491 * @returns The opcode dword.
1492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1493 */
1494uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1495{
1496# ifdef IEM_WITH_CODE_TLB
1497 uint32_t u32;
1498 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1499 return u32;
1500# else
1501 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1502 if (rcStrict == VINF_SUCCESS)
1503 {
1504 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1505 pVCpu->iem.s.offOpcode = offOpcode + 4;
1506# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1507 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1508# else
1509 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1510 pVCpu->iem.s.abOpcode[offOpcode + 1],
1511 pVCpu->iem.s.abOpcode[offOpcode + 2],
1512 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1513# endif
1514 }
1515 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1516# endif
1517}
1518
1519#endif /* IEM_WITH_SETJMP */
1520
1521#ifndef IEM_WITH_SETJMP
1522
1523/**
1524 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1525 *
1526 * @returns Strict VBox status code.
1527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1528 * @param pu64 Where to return the opcode dword.
1529 */
1530VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1531{
1532 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1533 if (rcStrict == VINF_SUCCESS)
1534 {
1535 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1536 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1537 pVCpu->iem.s.abOpcode[offOpcode + 1],
1538 pVCpu->iem.s.abOpcode[offOpcode + 2],
1539 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1540 pVCpu->iem.s.offOpcode = offOpcode + 4;
1541 }
1542 else
1543 *pu64 = 0;
1544 return rcStrict;
1545}
1546
1547
1548/**
1549 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1550 *
1551 * @returns Strict VBox status code.
1552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1553 * @param pu64 Where to return the opcode qword.
1554 */
1555VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1556{
1557 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1558 if (rcStrict == VINF_SUCCESS)
1559 {
1560 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1561 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1562 pVCpu->iem.s.abOpcode[offOpcode + 1],
1563 pVCpu->iem.s.abOpcode[offOpcode + 2],
1564 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1565 pVCpu->iem.s.offOpcode = offOpcode + 4;
1566 }
1567 else
1568 *pu64 = 0;
1569 return rcStrict;
1570}
1571
1572#endif /* !IEM_WITH_SETJMP */
1573
1574#ifndef IEM_WITH_SETJMP
1575
1576/**
1577 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1578 *
1579 * @returns Strict VBox status code.
1580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1581 * @param pu64 Where to return the opcode qword.
1582 */
1583VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1584{
1585 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1586 if (rcStrict == VINF_SUCCESS)
1587 {
1588 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1589# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1590 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1591# else
1592 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1593 pVCpu->iem.s.abOpcode[offOpcode + 1],
1594 pVCpu->iem.s.abOpcode[offOpcode + 2],
1595 pVCpu->iem.s.abOpcode[offOpcode + 3],
1596 pVCpu->iem.s.abOpcode[offOpcode + 4],
1597 pVCpu->iem.s.abOpcode[offOpcode + 5],
1598 pVCpu->iem.s.abOpcode[offOpcode + 6],
1599 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1600# endif
1601 pVCpu->iem.s.offOpcode = offOpcode + 8;
1602 }
1603 else
1604 *pu64 = 0;
1605 return rcStrict;
1606}
1607
1608#else /* IEM_WITH_SETJMP */
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1612 *
1613 * @returns The opcode qword.
1614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1615 */
1616uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1617{
1618# ifdef IEM_WITH_CODE_TLB
1619 uint64_t u64;
1620 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1621 return u64;
1622# else
1623 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1624 if (rcStrict == VINF_SUCCESS)
1625 {
1626 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1627 pVCpu->iem.s.offOpcode = offOpcode + 8;
1628# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1629 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1630# else
1631 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1632 pVCpu->iem.s.abOpcode[offOpcode + 1],
1633 pVCpu->iem.s.abOpcode[offOpcode + 2],
1634 pVCpu->iem.s.abOpcode[offOpcode + 3],
1635 pVCpu->iem.s.abOpcode[offOpcode + 4],
1636 pVCpu->iem.s.abOpcode[offOpcode + 5],
1637 pVCpu->iem.s.abOpcode[offOpcode + 6],
1638 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1639# endif
1640 }
1641 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1642# endif
1643}
1644
1645#endif /* IEM_WITH_SETJMP */
1646
1647
1648
1649/** @name Misc Worker Functions.
1650 * @{
1651 */
1652
1653/**
1654 * Gets the exception class for the specified exception vector.
1655 *
1656 * @returns The class of the specified exception.
1657 * @param uVector The exception vector.
1658 */
1659static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1660{
1661 Assert(uVector <= X86_XCPT_LAST);
1662 switch (uVector)
1663 {
1664 case X86_XCPT_DE:
1665 case X86_XCPT_TS:
1666 case X86_XCPT_NP:
1667 case X86_XCPT_SS:
1668 case X86_XCPT_GP:
1669 case X86_XCPT_SX: /* AMD only */
1670 return IEMXCPTCLASS_CONTRIBUTORY;
1671
1672 case X86_XCPT_PF:
1673 case X86_XCPT_VE: /* Intel only */
1674 return IEMXCPTCLASS_PAGE_FAULT;
1675
1676 case X86_XCPT_DF:
1677 return IEMXCPTCLASS_DOUBLE_FAULT;
1678 }
1679 return IEMXCPTCLASS_BENIGN;
1680}
1681
1682
1683/**
1684 * Evaluates how to handle an exception caused during delivery of another event
1685 * (exception / interrupt).
1686 *
1687 * @returns How to handle the recursive exception.
1688 * @param pVCpu The cross context virtual CPU structure of the
1689 * calling thread.
1690 * @param fPrevFlags The flags of the previous event.
1691 * @param uPrevVector The vector of the previous event.
1692 * @param fCurFlags The flags of the current exception.
1693 * @param uCurVector The vector of the current exception.
1694 * @param pfXcptRaiseInfo Where to store additional information about the
1695 * exception condition. Optional.
1696 */
1697VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1698 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1699{
1700 /*
1701 * Only CPU exceptions can be raised while delivering other events, software interrupt
1702 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1703 */
1704 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1705 Assert(pVCpu); RT_NOREF(pVCpu);
1706 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1707
1708 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1709 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1710 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1711 {
1712 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1713 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1714 {
1715 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1716 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1717 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1718 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1719 {
1720 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1721 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1722 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1723 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1724 uCurVector, pVCpu->cpum.GstCtx.cr2));
1725 }
1726 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1727 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1728 {
1729 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1730 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1731 }
1732 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1733 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1734 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1735 {
1736 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1737 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1738 }
1739 }
1740 else
1741 {
1742 if (uPrevVector == X86_XCPT_NMI)
1743 {
1744 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1745 if (uCurVector == X86_XCPT_PF)
1746 {
1747 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1748 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1749 }
1750 }
1751 else if ( uPrevVector == X86_XCPT_AC
1752 && uCurVector == X86_XCPT_AC)
1753 {
1754 enmRaise = IEMXCPTRAISE_CPU_HANG;
1755 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1756 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1757 }
1758 }
1759 }
1760 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1761 {
1762 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1763 if (uCurVector == X86_XCPT_PF)
1764 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1765 }
1766 else
1767 {
1768 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1769 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1770 }
1771
1772 if (pfXcptRaiseInfo)
1773 *pfXcptRaiseInfo = fRaiseInfo;
1774 return enmRaise;
1775}
1776
1777
1778/**
1779 * Enters the CPU shutdown state initiated by a triple fault or other
1780 * unrecoverable conditions.
1781 *
1782 * @returns Strict VBox status code.
1783 * @param pVCpu The cross context virtual CPU structure of the
1784 * calling thread.
1785 */
1786static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1787{
1788 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1789 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1790
1791 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1792 {
1793 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1794 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1795 }
1796
1797 RT_NOREF(pVCpu);
1798 return VINF_EM_TRIPLE_FAULT;
1799}
1800
1801
1802/**
1803 * Validates a new SS segment.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the
1807 * calling thread.
1808 * @param NewSS The new SS selctor.
1809 * @param uCpl The CPL to load the stack for.
1810 * @param pDesc Where to return the descriptor.
1811 */
1812static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1813{
1814 /* Null selectors are not allowed (we're not called for dispatching
1815 interrupts with SS=0 in long mode). */
1816 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1817 {
1818 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1819 return iemRaiseTaskSwitchFault0(pVCpu);
1820 }
1821
1822 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1823 if ((NewSS & X86_SEL_RPL) != uCpl)
1824 {
1825 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1826 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1827 }
1828
1829 /*
1830 * Read the descriptor.
1831 */
1832 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1833 if (rcStrict != VINF_SUCCESS)
1834 return rcStrict;
1835
1836 /*
1837 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1838 */
1839 if (!pDesc->Legacy.Gen.u1DescType)
1840 {
1841 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1842 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1843 }
1844
1845 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1846 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1847 {
1848 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1849 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1850 }
1851 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1852 {
1853 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1854 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1855 }
1856
1857 /* Is it there? */
1858 /** @todo testcase: Is this checked before the canonical / limit check below? */
1859 if (!pDesc->Legacy.Gen.u1Present)
1860 {
1861 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1862 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1863 }
1864
1865 return VINF_SUCCESS;
1866}
1867
1868/** @} */
1869
1870
1871/** @name Raising Exceptions.
1872 *
1873 * @{
1874 */
1875
1876
1877/**
1878 * Loads the specified stack far pointer from the TSS.
1879 *
1880 * @returns VBox strict status code.
1881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1882 * @param uCpl The CPL to load the stack for.
1883 * @param pSelSS Where to return the new stack segment.
1884 * @param puEsp Where to return the new stack pointer.
1885 */
1886static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1887{
1888 VBOXSTRICTRC rcStrict;
1889 Assert(uCpl < 4);
1890
1891 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1892 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1893 {
1894 /*
1895 * 16-bit TSS (X86TSS16).
1896 */
1897 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1898 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1899 {
1900 uint32_t off = uCpl * 4 + 2;
1901 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1902 {
1903 /** @todo check actual access pattern here. */
1904 uint32_t u32Tmp = 0; /* gcc maybe... */
1905 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1906 if (rcStrict == VINF_SUCCESS)
1907 {
1908 *puEsp = RT_LOWORD(u32Tmp);
1909 *pSelSS = RT_HIWORD(u32Tmp);
1910 return VINF_SUCCESS;
1911 }
1912 }
1913 else
1914 {
1915 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1916 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1917 }
1918 break;
1919 }
1920
1921 /*
1922 * 32-bit TSS (X86TSS32).
1923 */
1924 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1925 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1926 {
1927 uint32_t off = uCpl * 8 + 4;
1928 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1929 {
1930/** @todo check actual access pattern here. */
1931 uint64_t u64Tmp;
1932 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1933 if (rcStrict == VINF_SUCCESS)
1934 {
1935 *puEsp = u64Tmp & UINT32_MAX;
1936 *pSelSS = (RTSEL)(u64Tmp >> 32);
1937 return VINF_SUCCESS;
1938 }
1939 }
1940 else
1941 {
1942 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1943 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1944 }
1945 break;
1946 }
1947
1948 default:
1949 AssertFailed();
1950 rcStrict = VERR_IEM_IPE_4;
1951 break;
1952 }
1953
1954 *puEsp = 0; /* make gcc happy */
1955 *pSelSS = 0; /* make gcc happy */
1956 return rcStrict;
1957}
1958
1959
1960/**
1961 * Loads the specified stack pointer from the 64-bit TSS.
1962 *
1963 * @returns VBox strict status code.
1964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1965 * @param uCpl The CPL to load the stack for.
1966 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1967 * @param puRsp Where to return the new stack pointer.
1968 */
1969static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1970{
1971 Assert(uCpl < 4);
1972 Assert(uIst < 8);
1973 *puRsp = 0; /* make gcc happy */
1974
1975 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1976 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1977
1978 uint32_t off;
1979 if (uIst)
1980 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1981 else
1982 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1983 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1984 {
1985 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1986 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1987 }
1988
1989 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1990}
1991
1992
1993/**
1994 * Adjust the CPU state according to the exception being raised.
1995 *
1996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1997 * @param u8Vector The exception that has been raised.
1998 */
1999DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2000{
2001 switch (u8Vector)
2002 {
2003 case X86_XCPT_DB:
2004 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2005 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2006 break;
2007 /** @todo Read the AMD and Intel exception reference... */
2008 }
2009}
2010
2011
2012/**
2013 * Implements exceptions and interrupts for real mode.
2014 *
2015 * @returns VBox strict status code.
2016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2017 * @param cbInstr The number of bytes to offset rIP by in the return
2018 * address.
2019 * @param u8Vector The interrupt / exception vector number.
2020 * @param fFlags The flags.
2021 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2022 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2023 */
2024static VBOXSTRICTRC
2025iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2026 uint8_t cbInstr,
2027 uint8_t u8Vector,
2028 uint32_t fFlags,
2029 uint16_t uErr,
2030 uint64_t uCr2) RT_NOEXCEPT
2031{
2032 NOREF(uErr); NOREF(uCr2);
2033 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2034
2035 /*
2036 * Read the IDT entry.
2037 */
2038 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2039 {
2040 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2041 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2042 }
2043 RTFAR16 Idte;
2044 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2045 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2046 {
2047 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2048 return rcStrict;
2049 }
2050
2051 /*
2052 * Push the stack frame.
2053 */
2054 uint16_t *pu16Frame;
2055 uint64_t uNewRsp;
2056 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2057 if (rcStrict != VINF_SUCCESS)
2058 return rcStrict;
2059
2060 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2061#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2062 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2063 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2064 fEfl |= UINT16_C(0xf000);
2065#endif
2066 pu16Frame[2] = (uint16_t)fEfl;
2067 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2068 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2069 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2070 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2071 return rcStrict;
2072
2073 /*
2074 * Load the vector address into cs:ip and make exception specific state
2075 * adjustments.
2076 */
2077 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2078 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2079 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2080 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2081 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2082 pVCpu->cpum.GstCtx.rip = Idte.off;
2083 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2084 IEMMISC_SET_EFL(pVCpu, fEfl);
2085
2086 /** @todo do we actually do this in real mode? */
2087 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2088 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2089
2090 /* pVCpu->iem.s.enmCpuMode and pVCpu->iem.s.uCpl doesn't really change here,
2091 so best leave them alone in case we're in a weird kind of real mode... */
2092
2093 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2094}
2095
2096
2097/**
2098 * Loads a NULL data selector into when coming from V8086 mode.
2099 *
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 * @param pSReg Pointer to the segment register.
2102 */
2103DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2104{
2105 pSReg->Sel = 0;
2106 pSReg->ValidSel = 0;
2107 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2108 {
2109 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2110 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2111 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2112 }
2113 else
2114 {
2115 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2116 /** @todo check this on AMD-V */
2117 pSReg->u64Base = 0;
2118 pSReg->u32Limit = 0;
2119 }
2120}
2121
2122
2123/**
2124 * Loads a segment selector during a task switch in V8086 mode.
2125 *
2126 * @param pSReg Pointer to the segment register.
2127 * @param uSel The selector value to load.
2128 */
2129DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2130{
2131 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2132 pSReg->Sel = uSel;
2133 pSReg->ValidSel = uSel;
2134 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2135 pSReg->u64Base = uSel << 4;
2136 pSReg->u32Limit = 0xffff;
2137 pSReg->Attr.u = 0xf3;
2138}
2139
2140
2141/**
2142 * Loads a segment selector during a task switch in protected mode.
2143 *
2144 * In this task switch scenario, we would throw \#TS exceptions rather than
2145 * \#GPs.
2146 *
2147 * @returns VBox strict status code.
2148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2149 * @param pSReg Pointer to the segment register.
2150 * @param uSel The new selector value.
2151 *
2152 * @remarks This does _not_ handle CS or SS.
2153 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2154 */
2155static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2156{
2157 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2158
2159 /* Null data selector. */
2160 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2161 {
2162 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2163 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2164 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2165 return VINF_SUCCESS;
2166 }
2167
2168 /* Fetch the descriptor. */
2169 IEMSELDESC Desc;
2170 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2171 if (rcStrict != VINF_SUCCESS)
2172 {
2173 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2174 VBOXSTRICTRC_VAL(rcStrict)));
2175 return rcStrict;
2176 }
2177
2178 /* Must be a data segment or readable code segment. */
2179 if ( !Desc.Legacy.Gen.u1DescType
2180 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2181 {
2182 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2183 Desc.Legacy.Gen.u4Type));
2184 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2185 }
2186
2187 /* Check privileges for data segments and non-conforming code segments. */
2188 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2189 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2190 {
2191 /* The RPL and the new CPL must be less than or equal to the DPL. */
2192 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2193 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2194 {
2195 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2196 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2198 }
2199 }
2200
2201 /* Is it there? */
2202 if (!Desc.Legacy.Gen.u1Present)
2203 {
2204 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2205 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2206 }
2207
2208 /* The base and limit. */
2209 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2210 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2211
2212 /*
2213 * Ok, everything checked out fine. Now set the accessed bit before
2214 * committing the result into the registers.
2215 */
2216 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2217 {
2218 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2219 if (rcStrict != VINF_SUCCESS)
2220 return rcStrict;
2221 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2222 }
2223
2224 /* Commit */
2225 pSReg->Sel = uSel;
2226 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2227 pSReg->u32Limit = cbLimit;
2228 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2229 pSReg->ValidSel = uSel;
2230 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2231 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2232 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2233
2234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2235 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2236 return VINF_SUCCESS;
2237}
2238
2239
2240/**
2241 * Performs a task switch.
2242 *
2243 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2244 * caller is responsible for performing the necessary checks (like DPL, TSS
2245 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2246 * reference for JMP, CALL, IRET.
2247 *
2248 * If the task switch is the due to a software interrupt or hardware exception,
2249 * the caller is responsible for validating the TSS selector and descriptor. See
2250 * Intel Instruction reference for INT n.
2251 *
2252 * @returns VBox strict status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param enmTaskSwitch The cause of the task switch.
2255 * @param uNextEip The EIP effective after the task switch.
2256 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2257 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2258 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2259 * @param SelTSS The TSS selector of the new task.
2260 * @param pNewDescTSS Pointer to the new TSS descriptor.
2261 */
2262VBOXSTRICTRC
2263iemTaskSwitch(PVMCPUCC pVCpu,
2264 IEMTASKSWITCH enmTaskSwitch,
2265 uint32_t uNextEip,
2266 uint32_t fFlags,
2267 uint16_t uErr,
2268 uint64_t uCr2,
2269 RTSEL SelTSS,
2270 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2271{
2272 Assert(!IEM_IS_REAL_MODE(pVCpu));
2273 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2274 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2275
2276 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2277 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2278 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2279 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2280 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2281
2282 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2283 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2284
2285 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2286 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2287
2288 /* Update CR2 in case it's a page-fault. */
2289 /** @todo This should probably be done much earlier in IEM/PGM. See
2290 * @bugref{5653#c49}. */
2291 if (fFlags & IEM_XCPT_FLAGS_CR2)
2292 pVCpu->cpum.GstCtx.cr2 = uCr2;
2293
2294 /*
2295 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2296 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2297 */
2298 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2299 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2300 if (uNewTSSLimit < uNewTSSLimitMin)
2301 {
2302 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2303 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2304 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2305 }
2306
2307 /*
2308 * Task switches in VMX non-root mode always cause task switches.
2309 * The new TSS must have been read and validated (DPL, limits etc.) before a
2310 * task-switch VM-exit commences.
2311 *
2312 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2313 */
2314 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2315 {
2316 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2317 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2318 }
2319
2320 /*
2321 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2322 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2323 */
2324 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2325 {
2326 uint32_t const uExitInfo1 = SelTSS;
2327 uint32_t uExitInfo2 = uErr;
2328 switch (enmTaskSwitch)
2329 {
2330 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2331 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2332 default: break;
2333 }
2334 if (fFlags & IEM_XCPT_FLAGS_ERR)
2335 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2336 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2337 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2338
2339 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2340 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2341 RT_NOREF2(uExitInfo1, uExitInfo2);
2342 }
2343
2344 /*
2345 * Check the current TSS limit. The last written byte to the current TSS during the
2346 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2347 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2348 *
2349 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2350 * end up with smaller than "legal" TSS limits.
2351 */
2352 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2353 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2354 if (uCurTSSLimit < uCurTSSLimitMin)
2355 {
2356 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2357 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2359 }
2360
2361 /*
2362 * Verify that the new TSS can be accessed and map it. Map only the required contents
2363 * and not the entire TSS.
2364 */
2365 void *pvNewTSS;
2366 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2367 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2368 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2369 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2370 * not perform correct translation if this happens. See Intel spec. 7.2.1
2371 * "Task-State Segment". */
2372 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2373 if (rcStrict != VINF_SUCCESS)
2374 {
2375 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2376 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2377 return rcStrict;
2378 }
2379
2380 /*
2381 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2382 */
2383 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2384 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2385 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2386 {
2387 PX86DESC pDescCurTSS;
2388 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2389 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2390 if (rcStrict != VINF_SUCCESS)
2391 {
2392 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2393 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2394 return rcStrict;
2395 }
2396
2397 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2398 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2399 if (rcStrict != VINF_SUCCESS)
2400 {
2401 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2402 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2403 return rcStrict;
2404 }
2405
2406 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2407 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2408 {
2409 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2410 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2411 fEFlags &= ~X86_EFL_NT;
2412 }
2413 }
2414
2415 /*
2416 * Save the CPU state into the current TSS.
2417 */
2418 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 {
2421 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2422 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2423 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2424 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2425 pVCpu->cpum.GstCtx.ldtr.Sel));
2426 }
2427 if (fIsNewTSS386)
2428 {
2429 /*
2430 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2431 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2432 */
2433 void *pvCurTSS32;
2434 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2435 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2436 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2437 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2438 if (rcStrict != VINF_SUCCESS)
2439 {
2440 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2441 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2442 return rcStrict;
2443 }
2444
2445 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2446 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2447 pCurTSS32->eip = uNextEip;
2448 pCurTSS32->eflags = fEFlags;
2449 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2450 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2451 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2452 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2453 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2454 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2455 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2456 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2457 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2458 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2459 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2460 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2461 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2462 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2463
2464 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2465 if (rcStrict != VINF_SUCCESS)
2466 {
2467 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2468 VBOXSTRICTRC_VAL(rcStrict)));
2469 return rcStrict;
2470 }
2471 }
2472 else
2473 {
2474 /*
2475 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2476 */
2477 void *pvCurTSS16;
2478 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2479 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2480 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2481 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2482 if (rcStrict != VINF_SUCCESS)
2483 {
2484 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2485 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2486 return rcStrict;
2487 }
2488
2489 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2490 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2491 pCurTSS16->ip = uNextEip;
2492 pCurTSS16->flags = (uint16_t)fEFlags;
2493 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2494 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2495 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2496 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2497 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2498 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2499 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2500 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2501 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2502 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2503 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2504 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2505
2506 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2507 if (rcStrict != VINF_SUCCESS)
2508 {
2509 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2510 VBOXSTRICTRC_VAL(rcStrict)));
2511 return rcStrict;
2512 }
2513 }
2514
2515 /*
2516 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2517 */
2518 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2519 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2520 {
2521 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2522 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2523 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2524 }
2525
2526 /*
2527 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2528 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2529 */
2530 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2531 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2532 bool fNewDebugTrap;
2533 if (fIsNewTSS386)
2534 {
2535 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2536 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2537 uNewEip = pNewTSS32->eip;
2538 uNewEflags = pNewTSS32->eflags;
2539 uNewEax = pNewTSS32->eax;
2540 uNewEcx = pNewTSS32->ecx;
2541 uNewEdx = pNewTSS32->edx;
2542 uNewEbx = pNewTSS32->ebx;
2543 uNewEsp = pNewTSS32->esp;
2544 uNewEbp = pNewTSS32->ebp;
2545 uNewEsi = pNewTSS32->esi;
2546 uNewEdi = pNewTSS32->edi;
2547 uNewES = pNewTSS32->es;
2548 uNewCS = pNewTSS32->cs;
2549 uNewSS = pNewTSS32->ss;
2550 uNewDS = pNewTSS32->ds;
2551 uNewFS = pNewTSS32->fs;
2552 uNewGS = pNewTSS32->gs;
2553 uNewLdt = pNewTSS32->selLdt;
2554 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2555 }
2556 else
2557 {
2558 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2559 uNewCr3 = 0;
2560 uNewEip = pNewTSS16->ip;
2561 uNewEflags = pNewTSS16->flags;
2562 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2563 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2564 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2565 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2566 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2567 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2568 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2569 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2570 uNewES = pNewTSS16->es;
2571 uNewCS = pNewTSS16->cs;
2572 uNewSS = pNewTSS16->ss;
2573 uNewDS = pNewTSS16->ds;
2574 uNewFS = 0;
2575 uNewGS = 0;
2576 uNewLdt = pNewTSS16->selLdt;
2577 fNewDebugTrap = false;
2578 }
2579
2580 if (GCPtrNewTSS == GCPtrCurTSS)
2581 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2582 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2583
2584 /*
2585 * We're done accessing the new TSS.
2586 */
2587 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2588 if (rcStrict != VINF_SUCCESS)
2589 {
2590 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2591 return rcStrict;
2592 }
2593
2594 /*
2595 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2596 */
2597 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2598 {
2599 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2600 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2601 if (rcStrict != VINF_SUCCESS)
2602 {
2603 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2604 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2605 return rcStrict;
2606 }
2607
2608 /* Check that the descriptor indicates the new TSS is available (not busy). */
2609 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2610 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2611 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2612
2613 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2614 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2615 if (rcStrict != VINF_SUCCESS)
2616 {
2617 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2618 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2619 return rcStrict;
2620 }
2621 }
2622
2623 /*
2624 * From this point on, we're technically in the new task. We will defer exceptions
2625 * until the completion of the task switch but before executing any instructions in the new task.
2626 */
2627 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2628 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2629 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2630 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2631 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2632 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2633 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2634
2635 /* Set the busy bit in TR. */
2636 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2637
2638 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2639 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2640 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2641 {
2642 uNewEflags |= X86_EFL_NT;
2643 }
2644
2645 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2646 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2647 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2648
2649 pVCpu->cpum.GstCtx.eip = uNewEip;
2650 pVCpu->cpum.GstCtx.eax = uNewEax;
2651 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2652 pVCpu->cpum.GstCtx.edx = uNewEdx;
2653 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2654 pVCpu->cpum.GstCtx.esp = uNewEsp;
2655 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2656 pVCpu->cpum.GstCtx.esi = uNewEsi;
2657 pVCpu->cpum.GstCtx.edi = uNewEdi;
2658
2659 uNewEflags &= X86_EFL_LIVE_MASK;
2660 uNewEflags |= X86_EFL_RA1_MASK;
2661 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2662
2663 /*
2664 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2665 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2666 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2667 */
2668 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2669 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2670
2671 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2672 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2673
2674 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2675 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2676
2677 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2678 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2679
2680 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2681 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2682
2683 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2684 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2685 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2686
2687 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2688 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2689 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2690 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2691
2692 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2693 {
2694 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2695 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2696 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2697 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2698 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2699 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2700 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2701 }
2702
2703 /*
2704 * Switch CR3 for the new task.
2705 */
2706 if ( fIsNewTSS386
2707 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2708 {
2709 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2710 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2711 AssertRCSuccessReturn(rc, rc);
2712
2713 /* Inform PGM. */
2714 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2715 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2716 AssertRCReturn(rc, rc);
2717 /* ignore informational status codes */
2718
2719 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2720 }
2721
2722 /*
2723 * Switch LDTR for the new task.
2724 */
2725 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2726 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2727 else
2728 {
2729 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2730
2731 IEMSELDESC DescNewLdt;
2732 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2733 if (rcStrict != VINF_SUCCESS)
2734 {
2735 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2736 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2737 return rcStrict;
2738 }
2739 if ( !DescNewLdt.Legacy.Gen.u1Present
2740 || DescNewLdt.Legacy.Gen.u1DescType
2741 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2742 {
2743 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2744 uNewLdt, DescNewLdt.Legacy.u));
2745 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2746 }
2747
2748 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2749 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2750 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2751 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2752 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2753 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2754 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2755 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2756 }
2757
2758 IEMSELDESC DescSS;
2759 if (IEM_IS_V86_MODE(pVCpu))
2760 {
2761 pVCpu->iem.s.uCpl = 3;
2762 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2763 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2764 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2765 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2766 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2767 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2768
2769 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2770 DescSS.Legacy.u = 0;
2771 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2772 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2773 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2774 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2775 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2776 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2777 DescSS.Legacy.Gen.u2Dpl = 3;
2778 }
2779 else
2780 {
2781 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2782
2783 /*
2784 * Load the stack segment for the new task.
2785 */
2786 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2787 {
2788 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2789 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2790 }
2791
2792 /* Fetch the descriptor. */
2793 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2794 if (rcStrict != VINF_SUCCESS)
2795 {
2796 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2797 VBOXSTRICTRC_VAL(rcStrict)));
2798 return rcStrict;
2799 }
2800
2801 /* SS must be a data segment and writable. */
2802 if ( !DescSS.Legacy.Gen.u1DescType
2803 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2804 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2805 {
2806 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2807 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2808 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2809 }
2810
2811 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2812 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2813 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2814 {
2815 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2816 uNewCpl));
2817 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2818 }
2819
2820 /* Is it there? */
2821 if (!DescSS.Legacy.Gen.u1Present)
2822 {
2823 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2824 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2825 }
2826
2827 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2828 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2829
2830 /* Set the accessed bit before committing the result into SS. */
2831 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2832 {
2833 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2834 if (rcStrict != VINF_SUCCESS)
2835 return rcStrict;
2836 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2837 }
2838
2839 /* Commit SS. */
2840 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2841 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2842 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2843 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2844 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2845 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2847
2848 /* CPL has changed, update IEM before loading rest of segments. */
2849 pVCpu->iem.s.uCpl = uNewCpl;
2850
2851 /*
2852 * Load the data segments for the new task.
2853 */
2854 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2855 if (rcStrict != VINF_SUCCESS)
2856 return rcStrict;
2857 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2858 if (rcStrict != VINF_SUCCESS)
2859 return rcStrict;
2860 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2861 if (rcStrict != VINF_SUCCESS)
2862 return rcStrict;
2863 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2864 if (rcStrict != VINF_SUCCESS)
2865 return rcStrict;
2866
2867 /*
2868 * Load the code segment for the new task.
2869 */
2870 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2871 {
2872 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2873 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2874 }
2875
2876 /* Fetch the descriptor. */
2877 IEMSELDESC DescCS;
2878 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2879 if (rcStrict != VINF_SUCCESS)
2880 {
2881 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2882 return rcStrict;
2883 }
2884
2885 /* CS must be a code segment. */
2886 if ( !DescCS.Legacy.Gen.u1DescType
2887 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2888 {
2889 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2890 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* For conforming CS, DPL must be less than or equal to the RPL. */
2895 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2896 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2897 {
2898 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2899 DescCS.Legacy.Gen.u2Dpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* For non-conforming CS, DPL must match RPL. */
2904 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2905 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2906 {
2907 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2908 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2909 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2910 }
2911
2912 /* Is it there? */
2913 if (!DescCS.Legacy.Gen.u1Present)
2914 {
2915 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2916 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2917 }
2918
2919 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2920 u64Base = X86DESC_BASE(&DescCS.Legacy);
2921
2922 /* Set the accessed bit before committing the result into CS. */
2923 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2924 {
2925 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2926 if (rcStrict != VINF_SUCCESS)
2927 return rcStrict;
2928 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2929 }
2930
2931 /* Commit CS. */
2932 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2933 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2934 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2935 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2936 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2937 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2939 }
2940
2941 /* Make sure the CPU mode is correct. */
2942 IEMMODE const enmNewCpuMode = iemCalcCpuMode(pVCpu);
2943 if (enmNewCpuMode != pVCpu->iem.s.enmCpuMode)
2944 Log(("iemTaskSwitch: cpu mode %d -> %d\n", pVCpu->iem.s.enmCpuMode, enmNewCpuMode));
2945 pVCpu->iem.s.enmCpuMode = enmNewCpuMode;
2946
2947 /** @todo Debug trap. */
2948 if (fIsNewTSS386 && fNewDebugTrap)
2949 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2950
2951 /*
2952 * Construct the error code masks based on what caused this task switch.
2953 * See Intel Instruction reference for INT.
2954 */
2955 uint16_t uExt;
2956 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2957 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2958 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2959 uExt = 1;
2960 else
2961 uExt = 0;
2962
2963 /*
2964 * Push any error code on to the new stack.
2965 */
2966 if (fFlags & IEM_XCPT_FLAGS_ERR)
2967 {
2968 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2969 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2970 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2971
2972 /* Check that there is sufficient space on the stack. */
2973 /** @todo Factor out segment limit checking for normal/expand down segments
2974 * into a separate function. */
2975 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2976 {
2977 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2978 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2979 {
2980 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2981 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2982 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2983 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2984 }
2985 }
2986 else
2987 {
2988 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2989 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2990 {
2991 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2992 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2993 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2994 }
2995 }
2996
2997
2998 if (fIsNewTSS386)
2999 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3000 else
3001 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3002 if (rcStrict != VINF_SUCCESS)
3003 {
3004 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3005 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3006 return rcStrict;
3007 }
3008 }
3009
3010 /* Check the new EIP against the new CS limit. */
3011 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3012 {
3013 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3014 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3015 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3016 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3017 }
3018
3019 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3020 pVCpu->cpum.GstCtx.ss.Sel));
3021 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3022}
3023
3024
3025/**
3026 * Implements exceptions and interrupts for protected mode.
3027 *
3028 * @returns VBox strict status code.
3029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3030 * @param cbInstr The number of bytes to offset rIP by in the return
3031 * address.
3032 * @param u8Vector The interrupt / exception vector number.
3033 * @param fFlags The flags.
3034 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3035 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3036 */
3037static VBOXSTRICTRC
3038iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3039 uint8_t cbInstr,
3040 uint8_t u8Vector,
3041 uint32_t fFlags,
3042 uint16_t uErr,
3043 uint64_t uCr2) RT_NOEXCEPT
3044{
3045 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3046
3047 /*
3048 * Read the IDT entry.
3049 */
3050 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3051 {
3052 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3053 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3054 }
3055 X86DESC Idte;
3056 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3057 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3058 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3059 {
3060 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3061 return rcStrict;
3062 }
3063 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3064 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3065 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3066
3067 /*
3068 * Check the descriptor type, DPL and such.
3069 * ASSUMES this is done in the same order as described for call-gate calls.
3070 */
3071 if (Idte.Gate.u1DescType)
3072 {
3073 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3074 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3075 }
3076 bool fTaskGate = false;
3077 uint8_t f32BitGate = true;
3078 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3079 switch (Idte.Gate.u4Type)
3080 {
3081 case X86_SEL_TYPE_SYS_UNDEFINED:
3082 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3083 case X86_SEL_TYPE_SYS_LDT:
3084 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3085 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3086 case X86_SEL_TYPE_SYS_UNDEFINED2:
3087 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3088 case X86_SEL_TYPE_SYS_UNDEFINED3:
3089 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3090 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3091 case X86_SEL_TYPE_SYS_UNDEFINED4:
3092 {
3093 /** @todo check what actually happens when the type is wrong...
3094 * esp. call gates. */
3095 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3096 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3097 }
3098
3099 case X86_SEL_TYPE_SYS_286_INT_GATE:
3100 f32BitGate = false;
3101 RT_FALL_THRU();
3102 case X86_SEL_TYPE_SYS_386_INT_GATE:
3103 fEflToClear |= X86_EFL_IF;
3104 break;
3105
3106 case X86_SEL_TYPE_SYS_TASK_GATE:
3107 fTaskGate = true;
3108#ifndef IEM_IMPLEMENTS_TASKSWITCH
3109 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3110#endif
3111 break;
3112
3113 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3114 f32BitGate = false;
3115 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3116 break;
3117
3118 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3119 }
3120
3121 /* Check DPL against CPL if applicable. */
3122 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3123 {
3124 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3125 {
3126 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3127 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3128 }
3129 }
3130
3131 /* Is it there? */
3132 if (!Idte.Gate.u1Present)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3135 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3136 }
3137
3138 /* Is it a task-gate? */
3139 if (fTaskGate)
3140 {
3141 /*
3142 * Construct the error code masks based on what caused this task switch.
3143 * See Intel Instruction reference for INT.
3144 */
3145 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3146 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3147 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3148 RTSEL SelTSS = Idte.Gate.u16Sel;
3149
3150 /*
3151 * Fetch the TSS descriptor in the GDT.
3152 */
3153 IEMSELDESC DescTSS;
3154 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3155 if (rcStrict != VINF_SUCCESS)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3158 VBOXSTRICTRC_VAL(rcStrict)));
3159 return rcStrict;
3160 }
3161
3162 /* The TSS descriptor must be a system segment and be available (not busy). */
3163 if ( DescTSS.Legacy.Gen.u1DescType
3164 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3165 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3168 u8Vector, SelTSS, DescTSS.Legacy.au64));
3169 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3170 }
3171
3172 /* The TSS must be present. */
3173 if (!DescTSS.Legacy.Gen.u1Present)
3174 {
3175 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3176 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3177 }
3178
3179 /* Do the actual task switch. */
3180 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3181 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3182 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3183 }
3184
3185 /* A null CS is bad. */
3186 RTSEL NewCS = Idte.Gate.u16Sel;
3187 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3188 {
3189 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3190 return iemRaiseGeneralProtectionFault0(pVCpu);
3191 }
3192
3193 /* Fetch the descriptor for the new CS. */
3194 IEMSELDESC DescCS;
3195 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3196 if (rcStrict != VINF_SUCCESS)
3197 {
3198 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3199 return rcStrict;
3200 }
3201
3202 /* Must be a code segment. */
3203 if (!DescCS.Legacy.Gen.u1DescType)
3204 {
3205 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3206 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3207 }
3208 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3209 {
3210 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3211 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3212 }
3213
3214 /* Don't allow lowering the privilege level. */
3215 /** @todo Does the lowering of privileges apply to software interrupts
3216 * only? This has bearings on the more-privileged or
3217 * same-privilege stack behavior further down. A testcase would
3218 * be nice. */
3219 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3220 {
3221 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3222 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3223 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3224 }
3225
3226 /* Make sure the selector is present. */
3227 if (!DescCS.Legacy.Gen.u1Present)
3228 {
3229 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3230 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3231 }
3232
3233 /* Check the new EIP against the new CS limit. */
3234 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3235 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3236 ? Idte.Gate.u16OffsetLow
3237 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3238 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3239 if (uNewEip > cbLimitCS)
3240 {
3241 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3242 u8Vector, uNewEip, cbLimitCS, NewCS));
3243 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3244 }
3245 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3246
3247 /* Calc the flag image to push. */
3248 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3249 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3250 fEfl &= ~X86_EFL_RF;
3251 else
3252 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3253
3254 /* From V8086 mode only go to CPL 0. */
3255 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3256 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3257 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3258 {
3259 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3260 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3261 }
3262
3263 /*
3264 * If the privilege level changes, we need to get a new stack from the TSS.
3265 * This in turns means validating the new SS and ESP...
3266 */
3267 if (uNewCpl != pVCpu->iem.s.uCpl)
3268 {
3269 RTSEL NewSS;
3270 uint32_t uNewEsp;
3271 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3272 if (rcStrict != VINF_SUCCESS)
3273 return rcStrict;
3274
3275 IEMSELDESC DescSS;
3276 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3277 if (rcStrict != VINF_SUCCESS)
3278 return rcStrict;
3279 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3280 if (!DescSS.Legacy.Gen.u1DefBig)
3281 {
3282 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3283 uNewEsp = (uint16_t)uNewEsp;
3284 }
3285
3286 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3287
3288 /* Check that there is sufficient space for the stack frame. */
3289 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3290 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3291 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3292 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3293
3294 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3295 {
3296 if ( uNewEsp - 1 > cbLimitSS
3297 || uNewEsp < cbStackFrame)
3298 {
3299 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3300 u8Vector, NewSS, uNewEsp, cbStackFrame));
3301 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3302 }
3303 }
3304 else
3305 {
3306 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3307 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3308 {
3309 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3310 u8Vector, NewSS, uNewEsp, cbStackFrame));
3311 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3312 }
3313 }
3314
3315 /*
3316 * Start making changes.
3317 */
3318
3319 /* Set the new CPL so that stack accesses use it. */
3320 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3321 pVCpu->iem.s.uCpl = uNewCpl;
3322
3323 /* Create the stack frame. */
3324 RTPTRUNION uStackFrame;
3325 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3326 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3327 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3328 if (rcStrict != VINF_SUCCESS)
3329 return rcStrict;
3330 void * const pvStackFrame = uStackFrame.pv;
3331 if (f32BitGate)
3332 {
3333 if (fFlags & IEM_XCPT_FLAGS_ERR)
3334 *uStackFrame.pu32++ = uErr;
3335 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3336 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3337 uStackFrame.pu32[2] = fEfl;
3338 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3339 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3340 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3341 if (fEfl & X86_EFL_VM)
3342 {
3343 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3344 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3345 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3346 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3347 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3348 }
3349 }
3350 else
3351 {
3352 if (fFlags & IEM_XCPT_FLAGS_ERR)
3353 *uStackFrame.pu16++ = uErr;
3354 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3355 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3356 uStackFrame.pu16[2] = fEfl;
3357 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3358 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3359 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3360 if (fEfl & X86_EFL_VM)
3361 {
3362 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3363 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3364 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3365 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3366 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3367 }
3368 }
3369 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3370 if (rcStrict != VINF_SUCCESS)
3371 return rcStrict;
3372
3373 /* Mark the selectors 'accessed' (hope this is the correct time). */
3374 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3375 * after pushing the stack frame? (Write protect the gdt + stack to
3376 * find out.) */
3377 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3378 {
3379 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3380 if (rcStrict != VINF_SUCCESS)
3381 return rcStrict;
3382 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3383 }
3384
3385 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3386 {
3387 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3388 if (rcStrict != VINF_SUCCESS)
3389 return rcStrict;
3390 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3391 }
3392
3393 /*
3394 * Start comitting the register changes (joins with the DPL=CPL branch).
3395 */
3396 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3397 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3398 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3399 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3400 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3401 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3402 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3403 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3404 * SP is loaded).
3405 * Need to check the other combinations too:
3406 * - 16-bit TSS, 32-bit handler
3407 * - 32-bit TSS, 16-bit handler */
3408 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3409 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3410 else
3411 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3412
3413 if (fEfl & X86_EFL_VM)
3414 {
3415 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3416 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3417 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3418 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3419 }
3420 }
3421 /*
3422 * Same privilege, no stack change and smaller stack frame.
3423 */
3424 else
3425 {
3426 uint64_t uNewRsp;
3427 RTPTRUNION uStackFrame;
3428 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3429 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3430 if (rcStrict != VINF_SUCCESS)
3431 return rcStrict;
3432 void * const pvStackFrame = uStackFrame.pv;
3433
3434 if (f32BitGate)
3435 {
3436 if (fFlags & IEM_XCPT_FLAGS_ERR)
3437 *uStackFrame.pu32++ = uErr;
3438 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3439 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3440 uStackFrame.pu32[2] = fEfl;
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 }
3450 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453
3454 /* Mark the CS selector as 'accessed'. */
3455 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3456 {
3457 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3458 if (rcStrict != VINF_SUCCESS)
3459 return rcStrict;
3460 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3461 }
3462
3463 /*
3464 * Start committing the register changes (joins with the other branch).
3465 */
3466 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3467 }
3468
3469 /* ... register committing continues. */
3470 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3471 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3472 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3473 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3474 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3475 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3476
3477 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3478 fEfl &= ~fEflToClear;
3479 IEMMISC_SET_EFL(pVCpu, fEfl);
3480
3481 if (fFlags & IEM_XCPT_FLAGS_CR2)
3482 pVCpu->cpum.GstCtx.cr2 = uCr2;
3483
3484 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3485 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3486
3487 /* Make sure the CPU mode is correct. */
3488 IEMMODE const enmNewCpuMode = iemCalcCpuMode(pVCpu);
3489 if (enmNewCpuMode != pVCpu->iem.s.enmCpuMode)
3490 Log(("iemRaiseXcptOrIntInProtMode: cpu mode %d -> %d\n", pVCpu->iem.s.enmCpuMode, enmNewCpuMode));
3491 pVCpu->iem.s.enmCpuMode = enmNewCpuMode;
3492
3493 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3494}
3495
3496
3497/**
3498 * Implements exceptions and interrupts for long mode.
3499 *
3500 * @returns VBox strict status code.
3501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3502 * @param cbInstr The number of bytes to offset rIP by in the return
3503 * address.
3504 * @param u8Vector The interrupt / exception vector number.
3505 * @param fFlags The flags.
3506 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3507 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3508 */
3509static VBOXSTRICTRC
3510iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3511 uint8_t cbInstr,
3512 uint8_t u8Vector,
3513 uint32_t fFlags,
3514 uint16_t uErr,
3515 uint64_t uCr2) RT_NOEXCEPT
3516{
3517 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3518
3519 /*
3520 * Read the IDT entry.
3521 */
3522 uint16_t offIdt = (uint16_t)u8Vector << 4;
3523 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3524 {
3525 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3526 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3527 }
3528 X86DESC64 Idte;
3529#ifdef _MSC_VER /* Shut up silly compiler warning. */
3530 Idte.au64[0] = 0;
3531 Idte.au64[1] = 0;
3532#endif
3533 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3534 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3535 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3536 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3537 {
3538 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3539 return rcStrict;
3540 }
3541 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3542 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3543 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3544
3545 /*
3546 * Check the descriptor type, DPL and such.
3547 * ASSUMES this is done in the same order as described for call-gate calls.
3548 */
3549 if (Idte.Gate.u1DescType)
3550 {
3551 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3552 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3553 }
3554 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3555 switch (Idte.Gate.u4Type)
3556 {
3557 case AMD64_SEL_TYPE_SYS_INT_GATE:
3558 fEflToClear |= X86_EFL_IF;
3559 break;
3560 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3561 break;
3562
3563 default:
3564 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3565 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3566 }
3567
3568 /* Check DPL against CPL if applicable. */
3569 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3570 {
3571 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3572 {
3573 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3574 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3575 }
3576 }
3577
3578 /* Is it there? */
3579 if (!Idte.Gate.u1Present)
3580 {
3581 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3582 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3583 }
3584
3585 /* A null CS is bad. */
3586 RTSEL NewCS = Idte.Gate.u16Sel;
3587 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3588 {
3589 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3590 return iemRaiseGeneralProtectionFault0(pVCpu);
3591 }
3592
3593 /* Fetch the descriptor for the new CS. */
3594 IEMSELDESC DescCS;
3595 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3596 if (rcStrict != VINF_SUCCESS)
3597 {
3598 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3599 return rcStrict;
3600 }
3601
3602 /* Must be a 64-bit code segment. */
3603 if (!DescCS.Long.Gen.u1DescType)
3604 {
3605 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3606 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3607 }
3608 if ( !DescCS.Long.Gen.u1Long
3609 || DescCS.Long.Gen.u1DefBig
3610 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3611 {
3612 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3613 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3614 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3615 }
3616
3617 /* Don't allow lowering the privilege level. For non-conforming CS
3618 selectors, the CS.DPL sets the privilege level the trap/interrupt
3619 handler runs at. For conforming CS selectors, the CPL remains
3620 unchanged, but the CS.DPL must be <= CPL. */
3621 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3622 * when CPU in Ring-0. Result \#GP? */
3623 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3624 {
3625 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3626 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3627 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3628 }
3629
3630
3631 /* Make sure the selector is present. */
3632 if (!DescCS.Legacy.Gen.u1Present)
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3635 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3636 }
3637
3638 /* Check that the new RIP is canonical. */
3639 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3640 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3641 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3642 if (!IEM_IS_CANONICAL(uNewRip))
3643 {
3644 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3645 return iemRaiseGeneralProtectionFault0(pVCpu);
3646 }
3647
3648 /*
3649 * If the privilege level changes or if the IST isn't zero, we need to get
3650 * a new stack from the TSS.
3651 */
3652 uint64_t uNewRsp;
3653 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3654 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3655 if ( uNewCpl != pVCpu->iem.s.uCpl
3656 || Idte.Gate.u3IST != 0)
3657 {
3658 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3659 if (rcStrict != VINF_SUCCESS)
3660 return rcStrict;
3661 }
3662 else
3663 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3664 uNewRsp &= ~(uint64_t)0xf;
3665
3666 /*
3667 * Calc the flag image to push.
3668 */
3669 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3670 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3671 fEfl &= ~X86_EFL_RF;
3672 else
3673 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3674
3675 /*
3676 * Start making changes.
3677 */
3678 /* Set the new CPL so that stack accesses use it. */
3679 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3680 pVCpu->iem.s.uCpl = uNewCpl;
3681
3682 /* Create the stack frame. */
3683 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3684 RTPTRUNION uStackFrame;
3685 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3686 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3687 if (rcStrict != VINF_SUCCESS)
3688 return rcStrict;
3689 void * const pvStackFrame = uStackFrame.pv;
3690
3691 if (fFlags & IEM_XCPT_FLAGS_ERR)
3692 *uStackFrame.pu64++ = uErr;
3693 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3694 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3695 uStackFrame.pu64[2] = fEfl;
3696 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3697 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3698 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3699 if (rcStrict != VINF_SUCCESS)
3700 return rcStrict;
3701
3702 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3703 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3704 * after pushing the stack frame? (Write protect the gdt + stack to
3705 * find out.) */
3706 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3707 {
3708 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3709 if (rcStrict != VINF_SUCCESS)
3710 return rcStrict;
3711 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3712 }
3713
3714 /*
3715 * Start comitting the register changes.
3716 */
3717 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3718 * hidden registers when interrupting 32-bit or 16-bit code! */
3719 if (uNewCpl != uOldCpl)
3720 {
3721 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3722 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3723 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3724 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3725 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3726 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3727 }
3728 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3729 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3730 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3731 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3732 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3733 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3734 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3735 pVCpu->cpum.GstCtx.rip = uNewRip;
3736 pVCpu->iem.s.enmCpuMode = IEMMODE_64BIT;
3737
3738 fEfl &= ~fEflToClear;
3739 IEMMISC_SET_EFL(pVCpu, fEfl);
3740
3741 if (fFlags & IEM_XCPT_FLAGS_CR2)
3742 pVCpu->cpum.GstCtx.cr2 = uCr2;
3743
3744 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3745 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3746
3747 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3748}
3749
3750
3751/**
3752 * Implements exceptions and interrupts.
3753 *
3754 * All exceptions and interrupts goes thru this function!
3755 *
3756 * @returns VBox strict status code.
3757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3758 * @param cbInstr The number of bytes to offset rIP by in the return
3759 * address.
3760 * @param u8Vector The interrupt / exception vector number.
3761 * @param fFlags The flags.
3762 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3763 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3764 */
3765VBOXSTRICTRC
3766iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3767 uint8_t cbInstr,
3768 uint8_t u8Vector,
3769 uint32_t fFlags,
3770 uint16_t uErr,
3771 uint64_t uCr2) RT_NOEXCEPT
3772{
3773 /*
3774 * Get all the state that we might need here.
3775 */
3776 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3777 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3778
3779#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3780 /*
3781 * Flush prefetch buffer
3782 */
3783 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3784#endif
3785
3786 /*
3787 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3788 */
3789 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3790 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3791 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3792 | IEM_XCPT_FLAGS_BP_INSTR
3793 | IEM_XCPT_FLAGS_ICEBP_INSTR
3794 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3795 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3796 {
3797 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3798 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3799 u8Vector = X86_XCPT_GP;
3800 uErr = 0;
3801 }
3802#ifdef DBGFTRACE_ENABLED
3803 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3804 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3805 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3806#endif
3807
3808 /*
3809 * Evaluate whether NMI blocking should be in effect.
3810 * Normally, NMI blocking is in effect whenever we inject an NMI.
3811 */
3812 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3813 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3814
3815#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3816 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3817 {
3818 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3819 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3820 return rcStrict0;
3821
3822 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3823 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3824 {
3825 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3826 fBlockNmi = false;
3827 }
3828 }
3829#endif
3830
3831#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3832 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3833 {
3834 /*
3835 * If the event is being injected as part of VMRUN, it isn't subject to event
3836 * intercepts in the nested-guest. However, secondary exceptions that occur
3837 * during injection of any event -are- subject to exception intercepts.
3838 *
3839 * See AMD spec. 15.20 "Event Injection".
3840 */
3841 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3842 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3843 else
3844 {
3845 /*
3846 * Check and handle if the event being raised is intercepted.
3847 */
3848 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3849 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3850 return rcStrict0;
3851 }
3852 }
3853#endif
3854
3855 /*
3856 * Set NMI blocking if necessary.
3857 */
3858 if (fBlockNmi)
3859 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3860
3861 /*
3862 * Do recursion accounting.
3863 */
3864 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3865 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3866 if (pVCpu->iem.s.cXcptRecursions == 0)
3867 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3868 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3869 else
3870 {
3871 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3872 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3873 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3874
3875 if (pVCpu->iem.s.cXcptRecursions >= 4)
3876 {
3877#ifdef DEBUG_bird
3878 AssertFailed();
3879#endif
3880 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3881 }
3882
3883 /*
3884 * Evaluate the sequence of recurring events.
3885 */
3886 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3887 NULL /* pXcptRaiseInfo */);
3888 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3889 { /* likely */ }
3890 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3891 {
3892 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3893 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3894 u8Vector = X86_XCPT_DF;
3895 uErr = 0;
3896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3897 /* VMX nested-guest #DF intercept needs to be checked here. */
3898 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3899 {
3900 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3901 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3902 return rcStrict0;
3903 }
3904#endif
3905 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3906 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3907 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3908 }
3909 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3910 {
3911 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3912 return iemInitiateCpuShutdown(pVCpu);
3913 }
3914 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3915 {
3916 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3917 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3918 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3919 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3920 return VERR_EM_GUEST_CPU_HANG;
3921 }
3922 else
3923 {
3924 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3925 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3926 return VERR_IEM_IPE_9;
3927 }
3928
3929 /*
3930 * The 'EXT' bit is set when an exception occurs during deliver of an external
3931 * event (such as an interrupt or earlier exception)[1]. Privileged software
3932 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3933 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3934 *
3935 * [1] - Intel spec. 6.13 "Error Code"
3936 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3937 * [3] - Intel Instruction reference for INT n.
3938 */
3939 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3940 && (fFlags & IEM_XCPT_FLAGS_ERR)
3941 && u8Vector != X86_XCPT_PF
3942 && u8Vector != X86_XCPT_DF)
3943 {
3944 uErr |= X86_TRAP_ERR_EXTERNAL;
3945 }
3946 }
3947
3948 pVCpu->iem.s.cXcptRecursions++;
3949 pVCpu->iem.s.uCurXcpt = u8Vector;
3950 pVCpu->iem.s.fCurXcpt = fFlags;
3951 pVCpu->iem.s.uCurXcptErr = uErr;
3952 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3953
3954 /*
3955 * Extensive logging.
3956 */
3957#if defined(LOG_ENABLED) && defined(IN_RING3)
3958 if (LogIs3Enabled())
3959 {
3960 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3961 PVM pVM = pVCpu->CTX_SUFF(pVM);
3962 char szRegs[4096];
3963 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3964 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3965 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3966 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3967 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3968 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3969 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3970 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3971 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3972 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3973 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3974 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3975 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3976 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3977 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3978 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3979 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3980 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3981 " efer=%016VR{efer}\n"
3982 " pat=%016VR{pat}\n"
3983 " sf_mask=%016VR{sf_mask}\n"
3984 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3985 " lstar=%016VR{lstar}\n"
3986 " star=%016VR{star} cstar=%016VR{cstar}\n"
3987 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3988 );
3989
3990 char szInstr[256];
3991 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3992 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3993 szInstr, sizeof(szInstr), NULL);
3994 Log3(("%s%s\n", szRegs, szInstr));
3995 }
3996#endif /* LOG_ENABLED */
3997
3998 /*
3999 * Stats.
4000 */
4001 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4002 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4003 else if (u8Vector <= X86_XCPT_LAST)
4004 {
4005 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4006 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4007 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4008 }
4009
4010 /*
4011 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4012 * to ensure that a stale TLB or paging cache entry will only cause one
4013 * spurious #PF.
4014 */
4015 if ( u8Vector == X86_XCPT_PF
4016 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4017 IEMTlbInvalidatePage(pVCpu, uCr2);
4018
4019 /*
4020 * Call the mode specific worker function.
4021 */
4022 VBOXSTRICTRC rcStrict;
4023 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4024 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4025 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4026 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4027 else
4028 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4029
4030 /* Flush the prefetch buffer. */
4031#ifdef IEM_WITH_CODE_TLB
4032 pVCpu->iem.s.pbInstrBuf = NULL;
4033#else
4034 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4035#endif
4036
4037 /*
4038 * Unwind.
4039 */
4040 pVCpu->iem.s.cXcptRecursions--;
4041 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4042 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4043 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4044 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
4045 pVCpu->iem.s.cXcptRecursions + 1));
4046 return rcStrict;
4047}
4048
4049#ifdef IEM_WITH_SETJMP
4050/**
4051 * See iemRaiseXcptOrInt. Will not return.
4052 */
4053DECL_NO_RETURN(void)
4054iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4055 uint8_t cbInstr,
4056 uint8_t u8Vector,
4057 uint32_t fFlags,
4058 uint16_t uErr,
4059 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4060{
4061 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4062 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4063}
4064#endif
4065
4066
4067/** \#DE - 00. */
4068VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4069{
4070 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4071}
4072
4073
4074/** \#DB - 01.
4075 * @note This automatically clear DR7.GD. */
4076VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4077{
4078 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4079 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4080 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4081}
4082
4083
4084/** \#BR - 05. */
4085VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4086{
4087 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4088}
4089
4090
4091/** \#UD - 06. */
4092VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4093{
4094 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4095}
4096
4097
4098/** \#NM - 07. */
4099VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4100{
4101 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4102}
4103
4104
4105/** \#TS(err) - 0a. */
4106VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4107{
4108 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4109}
4110
4111
4112/** \#TS(tr) - 0a. */
4113VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4114{
4115 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4116 pVCpu->cpum.GstCtx.tr.Sel, 0);
4117}
4118
4119
4120/** \#TS(0) - 0a. */
4121VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4122{
4123 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4124 0, 0);
4125}
4126
4127
4128/** \#TS(err) - 0a. */
4129VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4130{
4131 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4132 uSel & X86_SEL_MASK_OFF_RPL, 0);
4133}
4134
4135
4136/** \#NP(err) - 0b. */
4137VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4138{
4139 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4140}
4141
4142
4143/** \#NP(sel) - 0b. */
4144VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4145{
4146 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4147 uSel & ~X86_SEL_RPL, 0);
4148}
4149
4150
4151/** \#SS(seg) - 0c. */
4152VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4153{
4154 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4155 uSel & ~X86_SEL_RPL, 0);
4156}
4157
4158
4159/** \#SS(err) - 0c. */
4160VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4161{
4162 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4163}
4164
4165
4166/** \#GP(n) - 0d. */
4167VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4168{
4169 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4170}
4171
4172
4173/** \#GP(0) - 0d. */
4174VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4175{
4176 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4177}
4178
4179#ifdef IEM_WITH_SETJMP
4180/** \#GP(0) - 0d. */
4181DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4184}
4185#endif
4186
4187
4188/** \#GP(sel) - 0d. */
4189VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4190{
4191 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4192 Sel & ~X86_SEL_RPL, 0);
4193}
4194
4195
4196/** \#GP(0) - 0d. */
4197VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4198{
4199 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4200}
4201
4202
4203/** \#GP(sel) - 0d. */
4204VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4205{
4206 NOREF(iSegReg); NOREF(fAccess);
4207 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4208 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4209}
4210
4211#ifdef IEM_WITH_SETJMP
4212/** \#GP(sel) - 0d, longjmp. */
4213DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4214{
4215 NOREF(iSegReg); NOREF(fAccess);
4216 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4217 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219#endif
4220
4221/** \#GP(sel) - 0d. */
4222VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4223{
4224 NOREF(Sel);
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4226}
4227
4228#ifdef IEM_WITH_SETJMP
4229/** \#GP(sel) - 0d, longjmp. */
4230DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4231{
4232 NOREF(Sel);
4233 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4234}
4235#endif
4236
4237
4238/** \#GP(sel) - 0d. */
4239VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4240{
4241 NOREF(iSegReg); NOREF(fAccess);
4242 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244
4245#ifdef IEM_WITH_SETJMP
4246/** \#GP(sel) - 0d, longjmp. */
4247DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4248{
4249 NOREF(iSegReg); NOREF(fAccess);
4250 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4251}
4252#endif
4253
4254
4255/** \#PF(n) - 0e. */
4256VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4257{
4258 uint16_t uErr;
4259 switch (rc)
4260 {
4261 case VERR_PAGE_NOT_PRESENT:
4262 case VERR_PAGE_TABLE_NOT_PRESENT:
4263 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4264 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4265 uErr = 0;
4266 break;
4267
4268 default:
4269 AssertMsgFailed(("%Rrc\n", rc));
4270 RT_FALL_THRU();
4271 case VERR_ACCESS_DENIED:
4272 uErr = X86_TRAP_PF_P;
4273 break;
4274
4275 /** @todo reserved */
4276 }
4277
4278 if (pVCpu->iem.s.uCpl == 3)
4279 uErr |= X86_TRAP_PF_US;
4280
4281 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4282 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4283 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4284 uErr |= X86_TRAP_PF_ID;
4285
4286#if 0 /* This is so much non-sense, really. Why was it done like that? */
4287 /* Note! RW access callers reporting a WRITE protection fault, will clear
4288 the READ flag before calling. So, read-modify-write accesses (RW)
4289 can safely be reported as READ faults. */
4290 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4291 uErr |= X86_TRAP_PF_RW;
4292#else
4293 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4294 {
4295 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4296 /// (regardless of outcome of the comparison in the latter case).
4297 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4298 uErr |= X86_TRAP_PF_RW;
4299 }
4300#endif
4301
4302 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4303 of the memory operand rather than at the start of it. (Not sure what
4304 happens if it crosses a page boundrary.) The current heuristics for
4305 this is to report the #PF for the last byte if the access is more than
4306 64 bytes. This is probably not correct, but we can work that out later,
4307 main objective now is to get FXSAVE to work like for real hardware and
4308 make bs3-cpu-basic2 work. */
4309 if (cbAccess <= 64)
4310 { /* likely*/ }
4311 else
4312 GCPtrWhere += cbAccess - 1;
4313
4314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4315 uErr, GCPtrWhere);
4316}
4317
4318#ifdef IEM_WITH_SETJMP
4319/** \#PF(n) - 0e, longjmp. */
4320DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4321 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4322{
4323 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4324}
4325#endif
4326
4327
4328/** \#MF(0) - 10. */
4329VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4330{
4331 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4332 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4333
4334 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4335 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4336 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4337}
4338
4339
4340/** \#AC(0) - 11. */
4341VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4342{
4343 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4344}
4345
4346#ifdef IEM_WITH_SETJMP
4347/** \#AC(0) - 11, longjmp. */
4348DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4349{
4350 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4351}
4352#endif
4353
4354
4355/** \#XF(0)/\#XM(0) - 19. */
4356VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4357{
4358 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4359}
4360
4361
4362/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4363IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4364{
4365 NOREF(cbInstr);
4366 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4367}
4368
4369
4370/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4371IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4372{
4373 NOREF(cbInstr);
4374 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4375}
4376
4377
4378/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4379IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4380{
4381 NOREF(cbInstr);
4382 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4383}
4384
4385
4386/** @} */
4387
4388/** @name Common opcode decoders.
4389 * @{
4390 */
4391//#include <iprt/mem.h>
4392
4393/**
4394 * Used to add extra details about a stub case.
4395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4396 */
4397void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4398{
4399#if defined(LOG_ENABLED) && defined(IN_RING3)
4400 PVM pVM = pVCpu->CTX_SUFF(pVM);
4401 char szRegs[4096];
4402 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4403 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4404 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4405 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4406 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4407 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4408 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4409 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4410 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4411 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4412 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4413 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4414 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4415 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4416 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4417 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4418 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4419 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4420 " efer=%016VR{efer}\n"
4421 " pat=%016VR{pat}\n"
4422 " sf_mask=%016VR{sf_mask}\n"
4423 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4424 " lstar=%016VR{lstar}\n"
4425 " star=%016VR{star} cstar=%016VR{cstar}\n"
4426 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4427 );
4428
4429 char szInstr[256];
4430 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4431 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4432 szInstr, sizeof(szInstr), NULL);
4433
4434 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4435#else
4436 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4437#endif
4438}
4439
4440/** @} */
4441
4442
4443
4444/** @name Register Access.
4445 * @{
4446 */
4447
4448/**
4449 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4450 *
4451 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4452 * segment limit.
4453 *
4454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4455 * @param cbInstr Instruction size.
4456 * @param offNextInstr The offset of the next instruction.
4457 * @param enmEffOpSize Effective operand size.
4458 */
4459VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4460 IEMMODE enmEffOpSize) RT_NOEXCEPT
4461{
4462 switch (enmEffOpSize)
4463 {
4464 case IEMMODE_16BIT:
4465 {
4466 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4467 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4468 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no CS limit checks in 64-bit mode */))
4469 pVCpu->cpum.GstCtx.rip = uNewIp;
4470 else
4471 return iemRaiseGeneralProtectionFault0(pVCpu);
4472 break;
4473 }
4474
4475 case IEMMODE_32BIT:
4476 {
4477 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4478 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4479
4480 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4481 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4482 pVCpu->cpum.GstCtx.rip = uNewEip;
4483 else
4484 return iemRaiseGeneralProtectionFault0(pVCpu);
4485 break;
4486 }
4487
4488 case IEMMODE_64BIT:
4489 {
4490 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4491
4492 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4493 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4494 pVCpu->cpum.GstCtx.rip = uNewRip;
4495 else
4496 return iemRaiseGeneralProtectionFault0(pVCpu);
4497 break;
4498 }
4499
4500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4501 }
4502
4503#ifndef IEM_WITH_CODE_TLB
4504 /* Flush the prefetch buffer. */
4505 pVCpu->iem.s.cbOpcode = cbInstr;
4506#endif
4507
4508 /*
4509 * Clear RF and finish the instruction (maybe raise #DB).
4510 */
4511 return iemRegFinishClearingRF(pVCpu);
4512}
4513
4514
4515/**
4516 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4517 *
4518 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4519 * segment limit.
4520 *
4521 * @returns Strict VBox status code.
4522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4523 * @param cbInstr Instruction size.
4524 * @param offNextInstr The offset of the next instruction.
4525 */
4526VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4527{
4528 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4529
4530 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4531 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4532 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checking in 64-bit mode */))
4533 pVCpu->cpum.GstCtx.rip = uNewIp;
4534 else
4535 return iemRaiseGeneralProtectionFault0(pVCpu);
4536
4537#ifndef IEM_WITH_CODE_TLB
4538 /* Flush the prefetch buffer. */
4539 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4540#endif
4541
4542 /*
4543 * Clear RF and finish the instruction (maybe raise #DB).
4544 */
4545 return iemRegFinishClearingRF(pVCpu);
4546}
4547
4548
4549/**
4550 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4551 *
4552 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4553 * segment limit.
4554 *
4555 * @returns Strict VBox status code.
4556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4557 * @param cbInstr Instruction size.
4558 * @param offNextInstr The offset of the next instruction.
4559 * @param enmEffOpSize Effective operand size.
4560 */
4561VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4562 IEMMODE enmEffOpSize) RT_NOEXCEPT
4563{
4564 if (enmEffOpSize == IEMMODE_32BIT)
4565 {
4566 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4567
4568 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4569 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4570 pVCpu->cpum.GstCtx.rip = uNewEip;
4571 else
4572 return iemRaiseGeneralProtectionFault0(pVCpu);
4573 }
4574 else
4575 {
4576 Assert(enmEffOpSize == IEMMODE_64BIT);
4577
4578 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4579 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4580 pVCpu->cpum.GstCtx.rip = uNewRip;
4581 else
4582 return iemRaiseGeneralProtectionFault0(pVCpu);
4583 }
4584
4585#ifndef IEM_WITH_CODE_TLB
4586 /* Flush the prefetch buffer. */
4587 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4588#endif
4589
4590 /*
4591 * Clear RF and finish the instruction (maybe raise #DB).
4592 */
4593 return iemRegFinishClearingRF(pVCpu);
4594}
4595
4596
4597/**
4598 * Performs a near jump to the specified address.
4599 *
4600 * May raise a \#GP(0) if the new IP outside the code segment limit.
4601 *
4602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4603 * @param uNewIp The new IP value.
4604 */
4605VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4606{
4607 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4608 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checks in 64-bit mode */))
4609 pVCpu->cpum.GstCtx.rip = uNewIp;
4610 else
4611 return iemRaiseGeneralProtectionFault0(pVCpu);
4612 /** @todo Test 16-bit jump in 64-bit mode. */
4613
4614#ifndef IEM_WITH_CODE_TLB
4615 /* Flush the prefetch buffer. */
4616 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4617#endif
4618
4619 /*
4620 * Clear RF and finish the instruction (maybe raise #DB).
4621 */
4622 return iemRegFinishClearingRF(pVCpu);
4623}
4624
4625
4626/**
4627 * Performs a near jump to the specified address.
4628 *
4629 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4630 *
4631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4632 * @param uNewEip The new EIP value.
4633 */
4634VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4635{
4636 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4637 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4638
4639 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4640 pVCpu->cpum.GstCtx.rip = uNewEip;
4641 else
4642 return iemRaiseGeneralProtectionFault0(pVCpu);
4643
4644#ifndef IEM_WITH_CODE_TLB
4645 /* Flush the prefetch buffer. */
4646 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4647#endif
4648
4649 /*
4650 * Clear RF and finish the instruction (maybe raise #DB).
4651 */
4652 return iemRegFinishClearingRF(pVCpu);
4653}
4654
4655
4656/**
4657 * Performs a near jump to the specified address.
4658 *
4659 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4660 * segment limit.
4661 *
4662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4663 * @param uNewRip The new RIP value.
4664 */
4665VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4666{
4667 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4668
4669 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4670 pVCpu->cpum.GstCtx.rip = uNewRip;
4671 else
4672 return iemRaiseGeneralProtectionFault0(pVCpu);
4673
4674#ifndef IEM_WITH_CODE_TLB
4675 /* Flush the prefetch buffer. */
4676 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4677#endif
4678
4679 /*
4680 * Clear RF and finish the instruction (maybe raise #DB).
4681 */
4682 return iemRegFinishClearingRF(pVCpu);
4683}
4684
4685/** @} */
4686
4687
4688/** @name FPU access and helpers.
4689 *
4690 * @{
4691 */
4692
4693/**
4694 * Updates the x87.DS and FPUDP registers.
4695 *
4696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4697 * @param pFpuCtx The FPU context.
4698 * @param iEffSeg The effective segment register.
4699 * @param GCPtrEff The effective address relative to @a iEffSeg.
4700 */
4701DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4702{
4703 RTSEL sel;
4704 switch (iEffSeg)
4705 {
4706 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4707 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4708 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4709 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4710 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4711 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4712 default:
4713 AssertMsgFailed(("%d\n", iEffSeg));
4714 sel = pVCpu->cpum.GstCtx.ds.Sel;
4715 }
4716 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4717 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4718 {
4719 pFpuCtx->DS = 0;
4720 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4721 }
4722 else if (!IEM_IS_LONG_MODE(pVCpu))
4723 {
4724 pFpuCtx->DS = sel;
4725 pFpuCtx->FPUDP = GCPtrEff;
4726 }
4727 else
4728 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4729}
4730
4731
4732/**
4733 * Rotates the stack registers in the push direction.
4734 *
4735 * @param pFpuCtx The FPU context.
4736 * @remarks This is a complete waste of time, but fxsave stores the registers in
4737 * stack order.
4738 */
4739DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4740{
4741 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4742 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4743 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4744 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4745 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4746 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4747 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4748 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4749 pFpuCtx->aRegs[0].r80 = r80Tmp;
4750}
4751
4752
4753/**
4754 * Rotates the stack registers in the pop direction.
4755 *
4756 * @param pFpuCtx The FPU context.
4757 * @remarks This is a complete waste of time, but fxsave stores the registers in
4758 * stack order.
4759 */
4760DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4761{
4762 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4763 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4764 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4765 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4766 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4767 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4768 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4769 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4770 pFpuCtx->aRegs[7].r80 = r80Tmp;
4771}
4772
4773
4774/**
4775 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4776 * exception prevents it.
4777 *
4778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4779 * @param pResult The FPU operation result to push.
4780 * @param pFpuCtx The FPU context.
4781 */
4782static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4783{
4784 /* Update FSW and bail if there are pending exceptions afterwards. */
4785 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4786 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4787 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4788 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4789 {
4790 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4791 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4792 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4793 pFpuCtx->FSW = fFsw;
4794 return;
4795 }
4796
4797 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4798 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4799 {
4800 /* All is fine, push the actual value. */
4801 pFpuCtx->FTW |= RT_BIT(iNewTop);
4802 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4803 }
4804 else if (pFpuCtx->FCW & X86_FCW_IM)
4805 {
4806 /* Masked stack overflow, push QNaN. */
4807 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4808 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4809 }
4810 else
4811 {
4812 /* Raise stack overflow, don't push anything. */
4813 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4814 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4815 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4816 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4817 return;
4818 }
4819
4820 fFsw &= ~X86_FSW_TOP_MASK;
4821 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4822 pFpuCtx->FSW = fFsw;
4823
4824 iemFpuRotateStackPush(pFpuCtx);
4825 RT_NOREF(pVCpu);
4826}
4827
4828
4829/**
4830 * Stores a result in a FPU register and updates the FSW and FTW.
4831 *
4832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4833 * @param pFpuCtx The FPU context.
4834 * @param pResult The result to store.
4835 * @param iStReg Which FPU register to store it in.
4836 */
4837static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4838{
4839 Assert(iStReg < 8);
4840 uint16_t fNewFsw = pFpuCtx->FSW;
4841 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4842 fNewFsw &= ~X86_FSW_C_MASK;
4843 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4844 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4845 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4846 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4847 pFpuCtx->FSW = fNewFsw;
4848 pFpuCtx->FTW |= RT_BIT(iReg);
4849 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4850 RT_NOREF(pVCpu);
4851}
4852
4853
4854/**
4855 * Only updates the FPU status word (FSW) with the result of the current
4856 * instruction.
4857 *
4858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4859 * @param pFpuCtx The FPU context.
4860 * @param u16FSW The FSW output of the current instruction.
4861 */
4862static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4863{
4864 uint16_t fNewFsw = pFpuCtx->FSW;
4865 fNewFsw &= ~X86_FSW_C_MASK;
4866 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4867 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4868 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4869 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4870 pFpuCtx->FSW = fNewFsw;
4871 RT_NOREF(pVCpu);
4872}
4873
4874
4875/**
4876 * Pops one item off the FPU stack if no pending exception prevents it.
4877 *
4878 * @param pFpuCtx The FPU context.
4879 */
4880static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4881{
4882 /* Check pending exceptions. */
4883 uint16_t uFSW = pFpuCtx->FSW;
4884 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4885 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4886 return;
4887
4888 /* TOP--. */
4889 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4890 uFSW &= ~X86_FSW_TOP_MASK;
4891 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4892 pFpuCtx->FSW = uFSW;
4893
4894 /* Mark the previous ST0 as empty. */
4895 iOldTop >>= X86_FSW_TOP_SHIFT;
4896 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4897
4898 /* Rotate the registers. */
4899 iemFpuRotateStackPop(pFpuCtx);
4900}
4901
4902
4903/**
4904 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4905 *
4906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4907 * @param pResult The FPU operation result to push.
4908 */
4909void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4910{
4911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4912 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4913 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4914}
4915
4916
4917/**
4918 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4919 * and sets FPUDP and FPUDS.
4920 *
4921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4922 * @param pResult The FPU operation result to push.
4923 * @param iEffSeg The effective segment register.
4924 * @param GCPtrEff The effective address relative to @a iEffSeg.
4925 */
4926void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4927{
4928 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4929 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4930 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4931 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4932}
4933
4934
4935/**
4936 * Replace ST0 with the first value and push the second onto the FPU stack,
4937 * unless a pending exception prevents it.
4938 *
4939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4940 * @param pResult The FPU operation result to store and push.
4941 */
4942void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4943{
4944 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4945 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4946
4947 /* Update FSW and bail if there are pending exceptions afterwards. */
4948 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4949 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4950 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4951 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4952 {
4953 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4954 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4955 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4956 pFpuCtx->FSW = fFsw;
4957 return;
4958 }
4959
4960 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4961 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4962 {
4963 /* All is fine, push the actual value. */
4964 pFpuCtx->FTW |= RT_BIT(iNewTop);
4965 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4966 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4967 }
4968 else if (pFpuCtx->FCW & X86_FCW_IM)
4969 {
4970 /* Masked stack overflow, push QNaN. */
4971 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4972 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4973 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4974 }
4975 else
4976 {
4977 /* Raise stack overflow, don't push anything. */
4978 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4979 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4980 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4981 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4982 return;
4983 }
4984
4985 fFsw &= ~X86_FSW_TOP_MASK;
4986 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4987 pFpuCtx->FSW = fFsw;
4988
4989 iemFpuRotateStackPush(pFpuCtx);
4990}
4991
4992
4993/**
4994 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4995 * FOP.
4996 *
4997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4998 * @param pResult The result to store.
4999 * @param iStReg Which FPU register to store it in.
5000 */
5001void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5002{
5003 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5004 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5005 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5006}
5007
5008
5009/**
5010 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5011 * FOP, and then pops the stack.
5012 *
5013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5014 * @param pResult The result to store.
5015 * @param iStReg Which FPU register to store it in.
5016 */
5017void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5018{
5019 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5020 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5021 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5022 iemFpuMaybePopOne(pFpuCtx);
5023}
5024
5025
5026/**
5027 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5028 * FPUDP, and FPUDS.
5029 *
5030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5031 * @param pResult The result to store.
5032 * @param iStReg Which FPU register to store it in.
5033 * @param iEffSeg The effective memory operand selector register.
5034 * @param GCPtrEff The effective memory operand offset.
5035 */
5036void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5037 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5038{
5039 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5040 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5041 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5042 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5043}
5044
5045
5046/**
5047 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5048 * FPUDP, and FPUDS, and then pops the stack.
5049 *
5050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5051 * @param pResult The result to store.
5052 * @param iStReg Which FPU register to store it in.
5053 * @param iEffSeg The effective memory operand selector register.
5054 * @param GCPtrEff The effective memory operand offset.
5055 */
5056void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5057 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5058{
5059 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5060 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5061 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5062 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5063 iemFpuMaybePopOne(pFpuCtx);
5064}
5065
5066
5067/**
5068 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5069 *
5070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5071 */
5072void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5073{
5074 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5075 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5076}
5077
5078
5079/**
5080 * Updates the FSW, FOP, FPUIP, and FPUCS.
5081 *
5082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5083 * @param u16FSW The FSW from the current instruction.
5084 */
5085void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5089 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5090}
5091
5092
5093/**
5094 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5095 *
5096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5097 * @param u16FSW The FSW from the current instruction.
5098 */
5099void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5100{
5101 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5102 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5103 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5104 iemFpuMaybePopOne(pFpuCtx);
5105}
5106
5107
5108/**
5109 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5110 *
5111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5112 * @param u16FSW The FSW from the current instruction.
5113 * @param iEffSeg The effective memory operand selector register.
5114 * @param GCPtrEff The effective memory operand offset.
5115 */
5116void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5117{
5118 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5119 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5120 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5121 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5122}
5123
5124
5125/**
5126 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5127 *
5128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5129 * @param u16FSW The FSW from the current instruction.
5130 */
5131void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5132{
5133 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5134 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5135 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5136 iemFpuMaybePopOne(pFpuCtx);
5137 iemFpuMaybePopOne(pFpuCtx);
5138}
5139
5140
5141/**
5142 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5143 *
5144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5145 * @param u16FSW The FSW from the current instruction.
5146 * @param iEffSeg The effective memory operand selector register.
5147 * @param GCPtrEff The effective memory operand offset.
5148 */
5149void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5150{
5151 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5152 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5153 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5154 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5155 iemFpuMaybePopOne(pFpuCtx);
5156}
5157
5158
5159/**
5160 * Worker routine for raising an FPU stack underflow exception.
5161 *
5162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5163 * @param pFpuCtx The FPU context.
5164 * @param iStReg The stack register being accessed.
5165 */
5166static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5167{
5168 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5169 if (pFpuCtx->FCW & X86_FCW_IM)
5170 {
5171 /* Masked underflow. */
5172 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5173 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5174 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5175 if (iStReg != UINT8_MAX)
5176 {
5177 pFpuCtx->FTW |= RT_BIT(iReg);
5178 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5179 }
5180 }
5181 else
5182 {
5183 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5184 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5185 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5186 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5187 }
5188 RT_NOREF(pVCpu);
5189}
5190
5191
5192/**
5193 * Raises a FPU stack underflow exception.
5194 *
5195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5196 * @param iStReg The destination register that should be loaded
5197 * with QNaN if \#IS is not masked. Specify
5198 * UINT8_MAX if none (like for fcom).
5199 */
5200void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5201{
5202 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5203 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5204 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5205}
5206
5207
5208void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5209{
5210 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5211 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5212 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5213 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5214}
5215
5216
5217void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5218{
5219 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5220 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5221 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5222 iemFpuMaybePopOne(pFpuCtx);
5223}
5224
5225
5226void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5227{
5228 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5229 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5230 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5231 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5232 iemFpuMaybePopOne(pFpuCtx);
5233}
5234
5235
5236void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5237{
5238 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5239 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5240 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5241 iemFpuMaybePopOne(pFpuCtx);
5242 iemFpuMaybePopOne(pFpuCtx);
5243}
5244
5245
5246void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5247{
5248 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5249 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5250
5251 if (pFpuCtx->FCW & X86_FCW_IM)
5252 {
5253 /* Masked overflow - Push QNaN. */
5254 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5255 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5256 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5257 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5258 pFpuCtx->FTW |= RT_BIT(iNewTop);
5259 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5260 iemFpuRotateStackPush(pFpuCtx);
5261 }
5262 else
5263 {
5264 /* Exception pending - don't change TOP or the register stack. */
5265 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5266 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5267 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5268 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5269 }
5270}
5271
5272
5273void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5274{
5275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5276 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5277
5278 if (pFpuCtx->FCW & X86_FCW_IM)
5279 {
5280 /* Masked overflow - Push QNaN. */
5281 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5282 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5283 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5284 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5285 pFpuCtx->FTW |= RT_BIT(iNewTop);
5286 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5287 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5288 iemFpuRotateStackPush(pFpuCtx);
5289 }
5290 else
5291 {
5292 /* Exception pending - don't change TOP or the register stack. */
5293 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5294 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5295 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5296 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5297 }
5298}
5299
5300
5301/**
5302 * Worker routine for raising an FPU stack overflow exception on a push.
5303 *
5304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5305 * @param pFpuCtx The FPU context.
5306 */
5307static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5308{
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328 RT_NOREF(pVCpu);
5329}
5330
5331
5332/**
5333 * Raises a FPU stack overflow exception on a push.
5334 *
5335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5336 */
5337void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5338{
5339 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5340 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5341 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5342}
5343
5344
5345/**
5346 * Raises a FPU stack overflow exception on a push with a memory operand.
5347 *
5348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5349 * @param iEffSeg The effective memory operand selector register.
5350 * @param GCPtrEff The effective memory operand offset.
5351 */
5352void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5353{
5354 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5355 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5356 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5357 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5358}
5359
5360/** @} */
5361
5362
5363/** @name SSE+AVX SIMD access and helpers.
5364 *
5365 * @{
5366 */
5367/**
5368 * Stores a result in a SIMD XMM register, updates the MXCSR.
5369 *
5370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5371 * @param pResult The result to store.
5372 * @param iXmmReg Which SIMD XMM register to store the result in.
5373 */
5374void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5375{
5376 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5377 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5378
5379 /* The result is only updated if there is no unmasked exception pending. */
5380 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5381 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5382 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5383}
5384
5385
5386/**
5387 * Updates the MXCSR.
5388 *
5389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5390 * @param fMxcsr The new MXCSR value.
5391 */
5392void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5393{
5394 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5395 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5396}
5397/** @} */
5398
5399
5400/** @name Memory access.
5401 *
5402 * @{
5403 */
5404
5405
5406/**
5407 * Updates the IEMCPU::cbWritten counter if applicable.
5408 *
5409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5410 * @param fAccess The access being accounted for.
5411 * @param cbMem The access size.
5412 */
5413DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5414{
5415 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5416 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5417 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5418}
5419
5420
5421/**
5422 * Applies the segment limit, base and attributes.
5423 *
5424 * This may raise a \#GP or \#SS.
5425 *
5426 * @returns VBox strict status code.
5427 *
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param fAccess The kind of access which is being performed.
5430 * @param iSegReg The index of the segment register to apply.
5431 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5432 * TSS, ++).
5433 * @param cbMem The access size.
5434 * @param pGCPtrMem Pointer to the guest memory address to apply
5435 * segmentation to. Input and output parameter.
5436 */
5437VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5438{
5439 if (iSegReg == UINT8_MAX)
5440 return VINF_SUCCESS;
5441
5442 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5443 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5444 switch (pVCpu->iem.s.enmCpuMode)
5445 {
5446 case IEMMODE_16BIT:
5447 case IEMMODE_32BIT:
5448 {
5449 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5450 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5451
5452 if ( pSel->Attr.n.u1Present
5453 && !pSel->Attr.n.u1Unusable)
5454 {
5455 Assert(pSel->Attr.n.u1DescType);
5456 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5457 {
5458 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5459 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5460 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5461
5462 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5463 {
5464 /** @todo CPL check. */
5465 }
5466
5467 /*
5468 * There are two kinds of data selectors, normal and expand down.
5469 */
5470 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5471 {
5472 if ( GCPtrFirst32 > pSel->u32Limit
5473 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5474 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5475 }
5476 else
5477 {
5478 /*
5479 * The upper boundary is defined by the B bit, not the G bit!
5480 */
5481 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5482 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5483 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5484 }
5485 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5486 }
5487 else
5488 {
5489 /*
5490 * Code selector and usually be used to read thru, writing is
5491 * only permitted in real and V8086 mode.
5492 */
5493 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5494 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5495 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5496 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5497 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5498
5499 if ( GCPtrFirst32 > pSel->u32Limit
5500 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5501 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5502
5503 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5504 {
5505 /** @todo CPL check. */
5506 }
5507
5508 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5509 }
5510 }
5511 else
5512 return iemRaiseGeneralProtectionFault0(pVCpu);
5513 return VINF_SUCCESS;
5514 }
5515
5516 case IEMMODE_64BIT:
5517 {
5518 RTGCPTR GCPtrMem = *pGCPtrMem;
5519 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5520 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5521
5522 Assert(cbMem >= 1);
5523 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5524 return VINF_SUCCESS;
5525 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5526 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5527 return iemRaiseGeneralProtectionFault0(pVCpu);
5528 }
5529
5530 default:
5531 AssertFailedReturn(VERR_IEM_IPE_7);
5532 }
5533}
5534
5535
5536/**
5537 * Translates a virtual address to a physical physical address and checks if we
5538 * can access the page as specified.
5539 *
5540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5541 * @param GCPtrMem The virtual address.
5542 * @param cbAccess The access size, for raising \#PF correctly for
5543 * FXSAVE and such.
5544 * @param fAccess The intended access.
5545 * @param pGCPhysMem Where to return the physical address.
5546 */
5547VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5548 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5549{
5550 /** @todo Need a different PGM interface here. We're currently using
5551 * generic / REM interfaces. this won't cut it for R0. */
5552 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5553 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5554 * here. */
5555 PGMPTWALK Walk;
5556 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5557 if (RT_FAILURE(rc))
5558 {
5559 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5560 /** @todo Check unassigned memory in unpaged mode. */
5561 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5562#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5563 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5564 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5565#endif
5566 *pGCPhysMem = NIL_RTGCPHYS;
5567 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5568 }
5569
5570 /* If the page is writable and does not have the no-exec bit set, all
5571 access is allowed. Otherwise we'll have to check more carefully... */
5572 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5573 {
5574 /* Write to read only memory? */
5575 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5576 && !(Walk.fEffective & X86_PTE_RW)
5577 && ( ( pVCpu->iem.s.uCpl == 3
5578 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5579 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5580 {
5581 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5582 *pGCPhysMem = NIL_RTGCPHYS;
5583#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5584 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5585 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5586#endif
5587 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5588 }
5589
5590 /* Kernel memory accessed by userland? */
5591 if ( !(Walk.fEffective & X86_PTE_US)
5592 && pVCpu->iem.s.uCpl == 3
5593 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5594 {
5595 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5596 *pGCPhysMem = NIL_RTGCPHYS;
5597#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5598 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5599 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5600#endif
5601 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5602 }
5603
5604 /* Executing non-executable memory? */
5605 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5606 && (Walk.fEffective & X86_PTE_PAE_NX)
5607 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5608 {
5609 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5610 *pGCPhysMem = NIL_RTGCPHYS;
5611#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5612 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5613 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5614#endif
5615 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5616 VERR_ACCESS_DENIED);
5617 }
5618 }
5619
5620 /*
5621 * Set the dirty / access flags.
5622 * ASSUMES this is set when the address is translated rather than on committ...
5623 */
5624 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5625 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5626 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5627 {
5628 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5629 AssertRC(rc2);
5630 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5631 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5632 }
5633
5634 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5635 *pGCPhysMem = GCPhys;
5636 return VINF_SUCCESS;
5637}
5638
5639
5640/**
5641 * Looks up a memory mapping entry.
5642 *
5643 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5645 * @param pvMem The memory address.
5646 * @param fAccess The access to.
5647 */
5648DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5649{
5650 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5651 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5652 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5653 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5654 return 0;
5655 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5656 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5657 return 1;
5658 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5659 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5660 return 2;
5661 return VERR_NOT_FOUND;
5662}
5663
5664
5665/**
5666 * Finds a free memmap entry when using iNextMapping doesn't work.
5667 *
5668 * @returns Memory mapping index, 1024 on failure.
5669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5670 */
5671static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5672{
5673 /*
5674 * The easy case.
5675 */
5676 if (pVCpu->iem.s.cActiveMappings == 0)
5677 {
5678 pVCpu->iem.s.iNextMapping = 1;
5679 return 0;
5680 }
5681
5682 /* There should be enough mappings for all instructions. */
5683 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5684
5685 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5686 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5687 return i;
5688
5689 AssertFailedReturn(1024);
5690}
5691
5692
5693/**
5694 * Commits a bounce buffer that needs writing back and unmaps it.
5695 *
5696 * @returns Strict VBox status code.
5697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5698 * @param iMemMap The index of the buffer to commit.
5699 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5700 * Always false in ring-3, obviously.
5701 */
5702static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5703{
5704 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5705 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5706#ifdef IN_RING3
5707 Assert(!fPostponeFail);
5708 RT_NOREF_PV(fPostponeFail);
5709#endif
5710
5711 /*
5712 * Do the writing.
5713 */
5714 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5715 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5716 {
5717 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5718 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5719 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5720 if (!pVCpu->iem.s.fBypassHandlers)
5721 {
5722 /*
5723 * Carefully and efficiently dealing with access handler return
5724 * codes make this a little bloated.
5725 */
5726 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5727 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5728 pbBuf,
5729 cbFirst,
5730 PGMACCESSORIGIN_IEM);
5731 if (rcStrict == VINF_SUCCESS)
5732 {
5733 if (cbSecond)
5734 {
5735 rcStrict = PGMPhysWrite(pVM,
5736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5737 pbBuf + cbFirst,
5738 cbSecond,
5739 PGMACCESSORIGIN_IEM);
5740 if (rcStrict == VINF_SUCCESS)
5741 { /* nothing */ }
5742 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5743 {
5744 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5745 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5746 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5747 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5748 }
5749#ifndef IN_RING3
5750 else if (fPostponeFail)
5751 {
5752 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5753 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5754 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5755 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5756 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5757 return iemSetPassUpStatus(pVCpu, rcStrict);
5758 }
5759#endif
5760 else
5761 {
5762 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5763 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5764 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5765 return rcStrict;
5766 }
5767 }
5768 }
5769 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5770 {
5771 if (!cbSecond)
5772 {
5773 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5774 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5775 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5776 }
5777 else
5778 {
5779 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5780 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5781 pbBuf + cbFirst,
5782 cbSecond,
5783 PGMACCESSORIGIN_IEM);
5784 if (rcStrict2 == VINF_SUCCESS)
5785 {
5786 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5789 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5790 }
5791 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5792 {
5793 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5794 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5795 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5796 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5797 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5798 }
5799#ifndef IN_RING3
5800 else if (fPostponeFail)
5801 {
5802 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5804 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5805 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5806 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5807 return iemSetPassUpStatus(pVCpu, rcStrict);
5808 }
5809#endif
5810 else
5811 {
5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5815 return rcStrict2;
5816 }
5817 }
5818 }
5819#ifndef IN_RING3
5820 else if (fPostponeFail)
5821 {
5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5825 if (!cbSecond)
5826 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5827 else
5828 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5829 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5830 return iemSetPassUpStatus(pVCpu, rcStrict);
5831 }
5832#endif
5833 else
5834 {
5835 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5837 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5838 return rcStrict;
5839 }
5840 }
5841 else
5842 {
5843 /*
5844 * No access handlers, much simpler.
5845 */
5846 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5847 if (RT_SUCCESS(rc))
5848 {
5849 if (cbSecond)
5850 {
5851 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5852 if (RT_SUCCESS(rc))
5853 { /* likely */ }
5854 else
5855 {
5856 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5859 return rc;
5860 }
5861 }
5862 }
5863 else
5864 {
5865 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5868 return rc;
5869 }
5870 }
5871 }
5872
5873#if defined(IEM_LOG_MEMORY_WRITES)
5874 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5875 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5876 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5877 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5878 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5879 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5880
5881 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5882 g_cbIemWrote = cbWrote;
5883 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5884#endif
5885
5886 /*
5887 * Free the mapping entry.
5888 */
5889 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5890 Assert(pVCpu->iem.s.cActiveMappings != 0);
5891 pVCpu->iem.s.cActiveMappings--;
5892 return VINF_SUCCESS;
5893}
5894
5895
5896/**
5897 * iemMemMap worker that deals with a request crossing pages.
5898 */
5899static VBOXSTRICTRC
5900iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5901{
5902 Assert(cbMem <= GUEST_PAGE_SIZE);
5903
5904 /*
5905 * Do the address translations.
5906 */
5907 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5908 RTGCPHYS GCPhysFirst;
5909 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5910 if (rcStrict != VINF_SUCCESS)
5911 return rcStrict;
5912 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5913
5914 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5915 RTGCPHYS GCPhysSecond;
5916 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5917 cbSecondPage, fAccess, &GCPhysSecond);
5918 if (rcStrict != VINF_SUCCESS)
5919 return rcStrict;
5920 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5921 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5922
5923 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5924
5925 /*
5926 * Read in the current memory content if it's a read, execute or partial
5927 * write access.
5928 */
5929 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5930
5931 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5932 {
5933 if (!pVCpu->iem.s.fBypassHandlers)
5934 {
5935 /*
5936 * Must carefully deal with access handler status codes here,
5937 * makes the code a bit bloated.
5938 */
5939 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5940 if (rcStrict == VINF_SUCCESS)
5941 {
5942 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5943 if (rcStrict == VINF_SUCCESS)
5944 { /*likely */ }
5945 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5946 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5947 else
5948 {
5949 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5950 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5951 return rcStrict;
5952 }
5953 }
5954 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5955 {
5956 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5957 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5958 {
5959 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5960 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5961 }
5962 else
5963 {
5964 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5965 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5966 return rcStrict2;
5967 }
5968 }
5969 else
5970 {
5971 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5972 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5973 return rcStrict;
5974 }
5975 }
5976 else
5977 {
5978 /*
5979 * No informational status codes here, much more straight forward.
5980 */
5981 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5982 if (RT_SUCCESS(rc))
5983 {
5984 Assert(rc == VINF_SUCCESS);
5985 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5986 if (RT_SUCCESS(rc))
5987 Assert(rc == VINF_SUCCESS);
5988 else
5989 {
5990 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5991 return rc;
5992 }
5993 }
5994 else
5995 {
5996 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5997 return rc;
5998 }
5999 }
6000 }
6001#ifdef VBOX_STRICT
6002 else
6003 memset(pbBuf, 0xcc, cbMem);
6004 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6005 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6006#endif
6007 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6008
6009 /*
6010 * Commit the bounce buffer entry.
6011 */
6012 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6013 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6014 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6015 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6016 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6017 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6018 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6019 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6020 pVCpu->iem.s.cActiveMappings++;
6021
6022 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6023 *ppvMem = pbBuf;
6024 return VINF_SUCCESS;
6025}
6026
6027
6028/**
6029 * iemMemMap woker that deals with iemMemPageMap failures.
6030 */
6031static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6032 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6033{
6034 /*
6035 * Filter out conditions we can handle and the ones which shouldn't happen.
6036 */
6037 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6038 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6039 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6040 {
6041 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6042 return rcMap;
6043 }
6044 pVCpu->iem.s.cPotentialExits++;
6045
6046 /*
6047 * Read in the current memory content if it's a read, execute or partial
6048 * write access.
6049 */
6050 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6051 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6052 {
6053 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6054 memset(pbBuf, 0xff, cbMem);
6055 else
6056 {
6057 int rc;
6058 if (!pVCpu->iem.s.fBypassHandlers)
6059 {
6060 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6061 if (rcStrict == VINF_SUCCESS)
6062 { /* nothing */ }
6063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6064 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6065 else
6066 {
6067 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6068 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6069 return rcStrict;
6070 }
6071 }
6072 else
6073 {
6074 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6075 if (RT_SUCCESS(rc))
6076 { /* likely */ }
6077 else
6078 {
6079 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6080 GCPhysFirst, rc));
6081 return rc;
6082 }
6083 }
6084 }
6085 }
6086#ifdef VBOX_STRICT
6087 else
6088 memset(pbBuf, 0xcc, cbMem);
6089#endif
6090#ifdef VBOX_STRICT
6091 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6092 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6093#endif
6094
6095 /*
6096 * Commit the bounce buffer entry.
6097 */
6098 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6099 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6100 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6101 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6102 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6103 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6104 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6105 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6106 pVCpu->iem.s.cActiveMappings++;
6107
6108 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6109 *ppvMem = pbBuf;
6110 return VINF_SUCCESS;
6111}
6112
6113
6114
6115/**
6116 * Maps the specified guest memory for the given kind of access.
6117 *
6118 * This may be using bounce buffering of the memory if it's crossing a page
6119 * boundary or if there is an access handler installed for any of it. Because
6120 * of lock prefix guarantees, we're in for some extra clutter when this
6121 * happens.
6122 *
6123 * This may raise a \#GP, \#SS, \#PF or \#AC.
6124 *
6125 * @returns VBox strict status code.
6126 *
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param ppvMem Where to return the pointer to the mapped memory.
6129 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6130 * 8, 12, 16, 32 or 512. When used by string operations
6131 * it can be up to a page.
6132 * @param iSegReg The index of the segment register to use for this
6133 * access. The base and limits are checked. Use UINT8_MAX
6134 * to indicate that no segmentation is required (for IDT,
6135 * GDT and LDT accesses).
6136 * @param GCPtrMem The address of the guest memory.
6137 * @param fAccess How the memory is being accessed. The
6138 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6139 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6140 * when raising exceptions.
6141 * @param uAlignCtl Alignment control:
6142 * - Bits 15:0 is the alignment mask.
6143 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6144 * IEM_MEMMAP_F_ALIGN_SSE, and
6145 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6146 * Pass zero to skip alignment.
6147 */
6148VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6149 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6150{
6151 /*
6152 * Check the input and figure out which mapping entry to use.
6153 */
6154 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6155 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6156 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6157 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6158 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6159
6160 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6161 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6162 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6163 {
6164 iMemMap = iemMemMapFindFree(pVCpu);
6165 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6166 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6167 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6168 pVCpu->iem.s.aMemMappings[2].fAccess),
6169 VERR_IEM_IPE_9);
6170 }
6171
6172 /*
6173 * Map the memory, checking that we can actually access it. If something
6174 * slightly complicated happens, fall back on bounce buffering.
6175 */
6176 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6177 if (rcStrict == VINF_SUCCESS)
6178 { /* likely */ }
6179 else
6180 return rcStrict;
6181
6182 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6183 { /* likely */ }
6184 else
6185 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6186
6187 /*
6188 * Alignment check.
6189 */
6190 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6191 { /* likelyish */ }
6192 else
6193 {
6194 /* Misaligned access. */
6195 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6196 {
6197 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6198 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6199 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6200 {
6201 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6202
6203 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6204 return iemRaiseAlignmentCheckException(pVCpu);
6205 }
6206 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6207 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6208 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6209 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6210 * that's what FXSAVE does on a 10980xe. */
6211 && iemMemAreAlignmentChecksEnabled(pVCpu))
6212 return iemRaiseAlignmentCheckException(pVCpu);
6213 else
6214 return iemRaiseGeneralProtectionFault0(pVCpu);
6215 }
6216 }
6217
6218#ifdef IEM_WITH_DATA_TLB
6219 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6220
6221 /*
6222 * Get the TLB entry for this page.
6223 */
6224 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6225 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6226 if (pTlbe->uTag == uTag)
6227 {
6228# ifdef VBOX_WITH_STATISTICS
6229 pVCpu->iem.s.DataTlb.cTlbHits++;
6230# endif
6231 }
6232 else
6233 {
6234 pVCpu->iem.s.DataTlb.cTlbMisses++;
6235 PGMPTWALK Walk;
6236 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6237 if (RT_FAILURE(rc))
6238 {
6239 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6240# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6241 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6242 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6243# endif
6244 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6245 }
6246
6247 Assert(Walk.fSucceeded);
6248 pTlbe->uTag = uTag;
6249 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6250 pTlbe->GCPhys = Walk.GCPhys;
6251 pTlbe->pbMappingR3 = NULL;
6252 }
6253
6254 /*
6255 * Check TLB page table level access flags.
6256 */
6257 /* If the page is either supervisor only or non-writable, we need to do
6258 more careful access checks. */
6259 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6260 {
6261 /* Write to read only memory? */
6262 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6263 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6264 && ( ( pVCpu->iem.s.uCpl == 3
6265 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6266 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6267 {
6268 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6269# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6270 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6271 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6272# endif
6273 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6274 }
6275
6276 /* Kernel memory accessed by userland? */
6277 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6278 && pVCpu->iem.s.uCpl == 3
6279 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6280 {
6281 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6282# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6283 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6284 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6285# endif
6286 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6287 }
6288 }
6289
6290 /*
6291 * Set the dirty / access flags.
6292 * ASSUMES this is set when the address is translated rather than on commit...
6293 */
6294 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6295 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6296 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6297 {
6298 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6299 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6300 AssertRC(rc2);
6301 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6302 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6303 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6304 }
6305
6306 /*
6307 * Look up the physical page info if necessary.
6308 */
6309 uint8_t *pbMem = NULL;
6310 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6311# ifdef IN_RING3
6312 pbMem = pTlbe->pbMappingR3;
6313# else
6314 pbMem = NULL;
6315# endif
6316 else
6317 {
6318 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6319 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6320 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6321 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6322 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6323 { /* likely */ }
6324 else
6325 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6326 pTlbe->pbMappingR3 = NULL;
6327 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6328 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6329 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6330 &pbMem, &pTlbe->fFlagsAndPhysRev);
6331 AssertRCReturn(rc, rc);
6332# ifdef IN_RING3
6333 pTlbe->pbMappingR3 = pbMem;
6334# endif
6335 }
6336
6337 /*
6338 * Check the physical page level access and mapping.
6339 */
6340 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6341 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6342 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6343 { /* probably likely */ }
6344 else
6345 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6346 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6347 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6348 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6349 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6350 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6351
6352 if (pbMem)
6353 {
6354 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6355 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6356 fAccess |= IEM_ACCESS_NOT_LOCKED;
6357 }
6358 else
6359 {
6360 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6361 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6362 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6363 if (rcStrict != VINF_SUCCESS)
6364 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6365 }
6366
6367 void * const pvMem = pbMem;
6368
6369 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6370 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6371 if (fAccess & IEM_ACCESS_TYPE_READ)
6372 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6373
6374#else /* !IEM_WITH_DATA_TLB */
6375
6376 RTGCPHYS GCPhysFirst;
6377 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6378 if (rcStrict != VINF_SUCCESS)
6379 return rcStrict;
6380
6381 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6382 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6383 if (fAccess & IEM_ACCESS_TYPE_READ)
6384 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6385
6386 void *pvMem;
6387 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6388 if (rcStrict != VINF_SUCCESS)
6389 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6390
6391#endif /* !IEM_WITH_DATA_TLB */
6392
6393 /*
6394 * Fill in the mapping table entry.
6395 */
6396 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6397 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6398 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6399 pVCpu->iem.s.cActiveMappings += 1;
6400
6401 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6402 *ppvMem = pvMem;
6403
6404 return VINF_SUCCESS;
6405}
6406
6407
6408/**
6409 * Commits the guest memory if bounce buffered and unmaps it.
6410 *
6411 * @returns Strict VBox status code.
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 * @param pvMem The mapping.
6414 * @param fAccess The kind of access.
6415 */
6416VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6417{
6418 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6419 AssertReturn(iMemMap >= 0, iMemMap);
6420
6421 /* If it's bounce buffered, we may need to write back the buffer. */
6422 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6423 {
6424 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6425 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6426 }
6427 /* Otherwise unlock it. */
6428 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6429 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6430
6431 /* Free the entry. */
6432 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6433 Assert(pVCpu->iem.s.cActiveMappings != 0);
6434 pVCpu->iem.s.cActiveMappings--;
6435 return VINF_SUCCESS;
6436}
6437
6438#ifdef IEM_WITH_SETJMP
6439
6440/**
6441 * Maps the specified guest memory for the given kind of access, longjmp on
6442 * error.
6443 *
6444 * This may be using bounce buffering of the memory if it's crossing a page
6445 * boundary or if there is an access handler installed for any of it. Because
6446 * of lock prefix guarantees, we're in for some extra clutter when this
6447 * happens.
6448 *
6449 * This may raise a \#GP, \#SS, \#PF or \#AC.
6450 *
6451 * @returns Pointer to the mapped memory.
6452 *
6453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6454 * @param cbMem The number of bytes to map. This is usually 1,
6455 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6456 * string operations it can be up to a page.
6457 * @param iSegReg The index of the segment register to use for
6458 * this access. The base and limits are checked.
6459 * Use UINT8_MAX to indicate that no segmentation
6460 * is required (for IDT, GDT and LDT accesses).
6461 * @param GCPtrMem The address of the guest memory.
6462 * @param fAccess How the memory is being accessed. The
6463 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6464 * how to map the memory, while the
6465 * IEM_ACCESS_WHAT_XXX bit is used when raising
6466 * exceptions.
6467 * @param uAlignCtl Alignment control:
6468 * - Bits 15:0 is the alignment mask.
6469 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6470 * IEM_MEMMAP_F_ALIGN_SSE, and
6471 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6472 * Pass zero to skip alignment.
6473 */
6474void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6475 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6476{
6477 /*
6478 * Check the input, check segment access and adjust address
6479 * with segment base.
6480 */
6481 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6482 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6483 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6484
6485 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6486 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6487 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6488
6489 /*
6490 * Alignment check.
6491 */
6492 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6493 { /* likelyish */ }
6494 else
6495 {
6496 /* Misaligned access. */
6497 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6498 {
6499 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6500 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6501 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6502 {
6503 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6504
6505 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6506 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6507 }
6508 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6509 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6510 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6511 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6512 * that's what FXSAVE does on a 10980xe. */
6513 && iemMemAreAlignmentChecksEnabled(pVCpu))
6514 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6515 else
6516 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6517 }
6518 }
6519
6520 /*
6521 * Figure out which mapping entry to use.
6522 */
6523 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6524 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6525 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6526 {
6527 iMemMap = iemMemMapFindFree(pVCpu);
6528 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6529 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6530 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6531 pVCpu->iem.s.aMemMappings[2].fAccess),
6532 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6533 }
6534
6535 /*
6536 * Crossing a page boundary?
6537 */
6538 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6539 { /* No (likely). */ }
6540 else
6541 {
6542 void *pvMem;
6543 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6544 if (rcStrict == VINF_SUCCESS)
6545 return pvMem;
6546 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6547 }
6548
6549#ifdef IEM_WITH_DATA_TLB
6550 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6551
6552 /*
6553 * Get the TLB entry for this page.
6554 */
6555 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6556 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6557 if (pTlbe->uTag == uTag)
6558 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6559 else
6560 {
6561 pVCpu->iem.s.DataTlb.cTlbMisses++;
6562 PGMPTWALK Walk;
6563 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6564 if (RT_FAILURE(rc))
6565 {
6566 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6567# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6568 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6569 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6570# endif
6571 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6572 }
6573
6574 Assert(Walk.fSucceeded);
6575 pTlbe->uTag = uTag;
6576 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6577 pTlbe->GCPhys = Walk.GCPhys;
6578 pTlbe->pbMappingR3 = NULL;
6579 }
6580
6581 /*
6582 * Check the flags and physical revision.
6583 */
6584 /** @todo make the caller pass these in with fAccess. */
6585 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6586 ? IEMTLBE_F_PT_NO_USER : 0;
6587 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6588 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6589 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6590 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6591 ? IEMTLBE_F_PT_NO_WRITE : 0)
6592 : 0;
6593 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6594 uint8_t *pbMem = NULL;
6595 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6596 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6597# ifdef IN_RING3
6598 pbMem = pTlbe->pbMappingR3;
6599# else
6600 pbMem = NULL;
6601# endif
6602 else
6603 {
6604 /*
6605 * Okay, something isn't quite right or needs refreshing.
6606 */
6607 /* Write to read only memory? */
6608 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6609 {
6610 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6611# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6612 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6613 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6614# endif
6615 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6616 }
6617
6618 /* Kernel memory accessed by userland? */
6619 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6620 {
6621 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6622# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6625# endif
6626 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6627 }
6628
6629 /* Set the dirty / access flags.
6630 ASSUMES this is set when the address is translated rather than on commit... */
6631 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6632 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6633 {
6634 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6635 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6636 AssertRC(rc2);
6637 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6638 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6639 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6640 }
6641
6642 /*
6643 * Check if the physical page info needs updating.
6644 */
6645 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6646# ifdef IN_RING3
6647 pbMem = pTlbe->pbMappingR3;
6648# else
6649 pbMem = NULL;
6650# endif
6651 else
6652 {
6653 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6654 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6655 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6656 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6657 pTlbe->pbMappingR3 = NULL;
6658 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6659 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6660 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6661 &pbMem, &pTlbe->fFlagsAndPhysRev);
6662 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6663# ifdef IN_RING3
6664 pTlbe->pbMappingR3 = pbMem;
6665# endif
6666 }
6667
6668 /*
6669 * Check the physical page level access and mapping.
6670 */
6671 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6672 { /* probably likely */ }
6673 else
6674 {
6675 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6676 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6677 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6678 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6679 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6680 if (rcStrict == VINF_SUCCESS)
6681 return pbMem;
6682 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6683 }
6684 }
6685 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6686
6687 if (pbMem)
6688 {
6689 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6690 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6691 fAccess |= IEM_ACCESS_NOT_LOCKED;
6692 }
6693 else
6694 {
6695 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6696 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6697 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6698 if (rcStrict == VINF_SUCCESS)
6699 return pbMem;
6700 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6701 }
6702
6703 void * const pvMem = pbMem;
6704
6705 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6706 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6707 if (fAccess & IEM_ACCESS_TYPE_READ)
6708 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6709
6710#else /* !IEM_WITH_DATA_TLB */
6711
6712
6713 RTGCPHYS GCPhysFirst;
6714 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6715 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6716 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6717
6718 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6719 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6720 if (fAccess & IEM_ACCESS_TYPE_READ)
6721 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6722
6723 void *pvMem;
6724 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6725 if (rcStrict == VINF_SUCCESS)
6726 { /* likely */ }
6727 else
6728 {
6729 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6730 if (rcStrict == VINF_SUCCESS)
6731 return pvMem;
6732 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6733 }
6734
6735#endif /* !IEM_WITH_DATA_TLB */
6736
6737 /*
6738 * Fill in the mapping table entry.
6739 */
6740 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6741 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6742 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6743 pVCpu->iem.s.cActiveMappings++;
6744
6745 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6746 return pvMem;
6747}
6748
6749
6750/**
6751 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6752 *
6753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6754 * @param pvMem The mapping.
6755 * @param fAccess The kind of access.
6756 */
6757void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6758{
6759 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6760 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6761
6762 /* If it's bounce buffered, we may need to write back the buffer. */
6763 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6764 {
6765 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6766 {
6767 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6768 if (rcStrict == VINF_SUCCESS)
6769 return;
6770 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6771 }
6772 }
6773 /* Otherwise unlock it. */
6774 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6775 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6776
6777 /* Free the entry. */
6778 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6779 Assert(pVCpu->iem.s.cActiveMappings != 0);
6780 pVCpu->iem.s.cActiveMappings--;
6781}
6782
6783#endif /* IEM_WITH_SETJMP */
6784
6785#ifndef IN_RING3
6786/**
6787 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6788 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6789 *
6790 * Allows the instruction to be completed and retired, while the IEM user will
6791 * return to ring-3 immediately afterwards and do the postponed writes there.
6792 *
6793 * @returns VBox status code (no strict statuses). Caller must check
6794 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param pvMem The mapping.
6797 * @param fAccess The kind of access.
6798 */
6799VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6800{
6801 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6802 AssertReturn(iMemMap >= 0, iMemMap);
6803
6804 /* If it's bounce buffered, we may need to write back the buffer. */
6805 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6806 {
6807 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6808 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6809 }
6810 /* Otherwise unlock it. */
6811 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6812 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6813
6814 /* Free the entry. */
6815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6816 Assert(pVCpu->iem.s.cActiveMappings != 0);
6817 pVCpu->iem.s.cActiveMappings--;
6818 return VINF_SUCCESS;
6819}
6820#endif
6821
6822
6823/**
6824 * Rollbacks mappings, releasing page locks and such.
6825 *
6826 * The caller shall only call this after checking cActiveMappings.
6827 *
6828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6829 */
6830void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6831{
6832 Assert(pVCpu->iem.s.cActiveMappings > 0);
6833
6834 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6835 while (iMemMap-- > 0)
6836 {
6837 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6838 if (fAccess != IEM_ACCESS_INVALID)
6839 {
6840 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6841 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6842 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6843 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6844 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6845 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6846 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6848 pVCpu->iem.s.cActiveMappings--;
6849 }
6850 }
6851}
6852
6853
6854/**
6855 * Fetches a data byte.
6856 *
6857 * @returns Strict VBox status code.
6858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6859 * @param pu8Dst Where to return the byte.
6860 * @param iSegReg The index of the segment register to use for
6861 * this access. The base and limits are checked.
6862 * @param GCPtrMem The address of the guest memory.
6863 */
6864VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6865{
6866 /* The lazy approach for now... */
6867 uint8_t const *pu8Src;
6868 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6869 if (rc == VINF_SUCCESS)
6870 {
6871 *pu8Dst = *pu8Src;
6872 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6873 }
6874 return rc;
6875}
6876
6877
6878#ifdef IEM_WITH_SETJMP
6879/**
6880 * Fetches a data byte, longjmp on error.
6881 *
6882 * @returns The byte.
6883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6884 * @param iSegReg The index of the segment register to use for
6885 * this access. The base and limits are checked.
6886 * @param GCPtrMem The address of the guest memory.
6887 */
6888uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6889{
6890 /* The lazy approach for now... */
6891 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6892 uint8_t const bRet = *pu8Src;
6893 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6894 return bRet;
6895}
6896#endif /* IEM_WITH_SETJMP */
6897
6898
6899/**
6900 * Fetches a data word.
6901 *
6902 * @returns Strict VBox status code.
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 * @param pu16Dst Where to return the word.
6905 * @param iSegReg The index of the segment register to use for
6906 * this access. The base and limits are checked.
6907 * @param GCPtrMem The address of the guest memory.
6908 */
6909VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6910{
6911 /* The lazy approach for now... */
6912 uint16_t const *pu16Src;
6913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6914 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6915 if (rc == VINF_SUCCESS)
6916 {
6917 *pu16Dst = *pu16Src;
6918 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6919 }
6920 return rc;
6921}
6922
6923
6924#ifdef IEM_WITH_SETJMP
6925/**
6926 * Fetches a data word, longjmp on error.
6927 *
6928 * @returns The word
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 * @param iSegReg The index of the segment register to use for
6931 * this access. The base and limits are checked.
6932 * @param GCPtrMem The address of the guest memory.
6933 */
6934uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6935{
6936 /* The lazy approach for now... */
6937 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6938 sizeof(*pu16Src) - 1);
6939 uint16_t const u16Ret = *pu16Src;
6940 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6941 return u16Ret;
6942}
6943#endif
6944
6945
6946/**
6947 * Fetches a data dword.
6948 *
6949 * @returns Strict VBox status code.
6950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6951 * @param pu32Dst Where to return the dword.
6952 * @param iSegReg The index of the segment register to use for
6953 * this access. The base and limits are checked.
6954 * @param GCPtrMem The address of the guest memory.
6955 */
6956VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6957{
6958 /* The lazy approach for now... */
6959 uint32_t const *pu32Src;
6960 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6961 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6962 if (rc == VINF_SUCCESS)
6963 {
6964 *pu32Dst = *pu32Src;
6965 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6966 }
6967 return rc;
6968}
6969
6970
6971/**
6972 * Fetches a data dword and zero extends it to a qword.
6973 *
6974 * @returns Strict VBox status code.
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 * @param pu64Dst Where to return the qword.
6977 * @param iSegReg The index of the segment register to use for
6978 * this access. The base and limits are checked.
6979 * @param GCPtrMem The address of the guest memory.
6980 */
6981VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6982{
6983 /* The lazy approach for now... */
6984 uint32_t const *pu32Src;
6985 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6986 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6987 if (rc == VINF_SUCCESS)
6988 {
6989 *pu64Dst = *pu32Src;
6990 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6991 }
6992 return rc;
6993}
6994
6995
6996#ifdef IEM_WITH_SETJMP
6997
6998/**
6999 * Fetches a data dword, longjmp on error, fallback/safe version.
7000 *
7001 * @returns The dword
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 * @param iSegReg The index of the segment register to use for
7004 * this access. The base and limits are checked.
7005 * @param GCPtrMem The address of the guest memory.
7006 */
7007uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7008{
7009 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7010 sizeof(*pu32Src) - 1);
7011 uint32_t const u32Ret = *pu32Src;
7012 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7013 return u32Ret;
7014}
7015
7016
7017/**
7018 * Fetches a data dword, longjmp on error.
7019 *
7020 * @returns The dword
7021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7022 * @param iSegReg The index of the segment register to use for
7023 * this access. The base and limits are checked.
7024 * @param GCPtrMem The address of the guest memory.
7025 */
7026uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7027{
7028# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7029 /*
7030 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7031 */
7032 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7033 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7034 {
7035 /*
7036 * TLB lookup.
7037 */
7038 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7039 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7040 if (pTlbe->uTag == uTag)
7041 {
7042 /*
7043 * Check TLB page table level access flags.
7044 */
7045 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7046 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7047 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7048 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7049 {
7050 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7051
7052 /*
7053 * Alignment check:
7054 */
7055 /** @todo check priority \#AC vs \#PF */
7056 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7057 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7058 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7059 || pVCpu->iem.s.uCpl != 3)
7060 {
7061 /*
7062 * Fetch and return the dword
7063 */
7064 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7065 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7066 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7067 }
7068 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7069 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7070 }
7071 }
7072 }
7073
7074 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7075 outdated page pointer, or other troubles. */
7076 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7077 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7078
7079# else
7080 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7081 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7082 uint32_t const u32Ret = *pu32Src;
7083 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7084 return u32Ret;
7085# endif
7086}
7087#endif
7088
7089
7090#ifdef SOME_UNUSED_FUNCTION
7091/**
7092 * Fetches a data dword and sign extends it to a qword.
7093 *
7094 * @returns Strict VBox status code.
7095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7096 * @param pu64Dst Where to return the sign extended value.
7097 * @param iSegReg The index of the segment register to use for
7098 * this access. The base and limits are checked.
7099 * @param GCPtrMem The address of the guest memory.
7100 */
7101VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7102{
7103 /* The lazy approach for now... */
7104 int32_t const *pi32Src;
7105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7106 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7107 if (rc == VINF_SUCCESS)
7108 {
7109 *pu64Dst = *pi32Src;
7110 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7111 }
7112#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7113 else
7114 *pu64Dst = 0;
7115#endif
7116 return rc;
7117}
7118#endif
7119
7120
7121/**
7122 * Fetches a data qword.
7123 *
7124 * @returns Strict VBox status code.
7125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7126 * @param pu64Dst Where to return the qword.
7127 * @param iSegReg The index of the segment register to use for
7128 * this access. The base and limits are checked.
7129 * @param GCPtrMem The address of the guest memory.
7130 */
7131VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7132{
7133 /* The lazy approach for now... */
7134 uint64_t const *pu64Src;
7135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7136 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7137 if (rc == VINF_SUCCESS)
7138 {
7139 *pu64Dst = *pu64Src;
7140 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7141 }
7142 return rc;
7143}
7144
7145
7146#ifdef IEM_WITH_SETJMP
7147/**
7148 * Fetches a data qword, longjmp on error.
7149 *
7150 * @returns The qword.
7151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7152 * @param iSegReg The index of the segment register to use for
7153 * this access. The base and limits are checked.
7154 * @param GCPtrMem The address of the guest memory.
7155 */
7156uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7157{
7158 /* The lazy approach for now... */
7159 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7160 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7161 uint64_t const u64Ret = *pu64Src;
7162 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7163 return u64Ret;
7164}
7165#endif
7166
7167
7168/**
7169 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7170 *
7171 * @returns Strict VBox status code.
7172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7173 * @param pu64Dst Where to return the qword.
7174 * @param iSegReg The index of the segment register to use for
7175 * this access. The base and limits are checked.
7176 * @param GCPtrMem The address of the guest memory.
7177 */
7178VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7179{
7180 /* The lazy approach for now... */
7181 uint64_t const *pu64Src;
7182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7183 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7184 if (rc == VINF_SUCCESS)
7185 {
7186 *pu64Dst = *pu64Src;
7187 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7188 }
7189 return rc;
7190}
7191
7192
7193#ifdef IEM_WITH_SETJMP
7194/**
7195 * Fetches a data qword, longjmp on error.
7196 *
7197 * @returns The qword.
7198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7199 * @param iSegReg The index of the segment register to use for
7200 * this access. The base and limits are checked.
7201 * @param GCPtrMem The address of the guest memory.
7202 */
7203uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7204{
7205 /* The lazy approach for now... */
7206 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7207 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7208 uint64_t const u64Ret = *pu64Src;
7209 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7210 return u64Ret;
7211}
7212#endif
7213
7214
7215/**
7216 * Fetches a data tword.
7217 *
7218 * @returns Strict VBox status code.
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pr80Dst Where to return the tword.
7221 * @param iSegReg The index of the segment register to use for
7222 * this access. The base and limits are checked.
7223 * @param GCPtrMem The address of the guest memory.
7224 */
7225VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7226{
7227 /* The lazy approach for now... */
7228 PCRTFLOAT80U pr80Src;
7229 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7230 if (rc == VINF_SUCCESS)
7231 {
7232 *pr80Dst = *pr80Src;
7233 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7234 }
7235 return rc;
7236}
7237
7238
7239#ifdef IEM_WITH_SETJMP
7240/**
7241 * Fetches a data tword, longjmp on error.
7242 *
7243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7244 * @param pr80Dst Where to return the tword.
7245 * @param iSegReg The index of the segment register to use for
7246 * this access. The base and limits are checked.
7247 * @param GCPtrMem The address of the guest memory.
7248 */
7249void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7250{
7251 /* The lazy approach for now... */
7252 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7253 *pr80Dst = *pr80Src;
7254 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7255}
7256#endif
7257
7258
7259/**
7260 * Fetches a data decimal tword.
7261 *
7262 * @returns Strict VBox status code.
7263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7264 * @param pd80Dst Where to return the tword.
7265 * @param iSegReg The index of the segment register to use for
7266 * this access. The base and limits are checked.
7267 * @param GCPtrMem The address of the guest memory.
7268 */
7269VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7270{
7271 /* The lazy approach for now... */
7272 PCRTPBCD80U pd80Src;
7273 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7274 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7275 if (rc == VINF_SUCCESS)
7276 {
7277 *pd80Dst = *pd80Src;
7278 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7279 }
7280 return rc;
7281}
7282
7283
7284#ifdef IEM_WITH_SETJMP
7285/**
7286 * Fetches a data decimal tword, longjmp on error.
7287 *
7288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7289 * @param pd80Dst Where to return the tword.
7290 * @param iSegReg The index of the segment register to use for
7291 * this access. The base and limits are checked.
7292 * @param GCPtrMem The address of the guest memory.
7293 */
7294void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7295{
7296 /* The lazy approach for now... */
7297 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7298 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7299 *pd80Dst = *pd80Src;
7300 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7301}
7302#endif
7303
7304
7305/**
7306 * Fetches a data dqword (double qword), generally SSE related.
7307 *
7308 * @returns Strict VBox status code.
7309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7310 * @param pu128Dst Where to return the qword.
7311 * @param iSegReg The index of the segment register to use for
7312 * this access. The base and limits are checked.
7313 * @param GCPtrMem The address of the guest memory.
7314 */
7315VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7316{
7317 /* The lazy approach for now... */
7318 PCRTUINT128U pu128Src;
7319 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7320 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7321 if (rc == VINF_SUCCESS)
7322 {
7323 pu128Dst->au64[0] = pu128Src->au64[0];
7324 pu128Dst->au64[1] = pu128Src->au64[1];
7325 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7326 }
7327 return rc;
7328}
7329
7330
7331#ifdef IEM_WITH_SETJMP
7332/**
7333 * Fetches a data dqword (double qword), generally SSE related.
7334 *
7335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7336 * @param pu128Dst Where to return the qword.
7337 * @param iSegReg The index of the segment register to use for
7338 * this access. The base and limits are checked.
7339 * @param GCPtrMem The address of the guest memory.
7340 */
7341void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7342{
7343 /* The lazy approach for now... */
7344 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7345 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7346 pu128Dst->au64[0] = pu128Src->au64[0];
7347 pu128Dst->au64[1] = pu128Src->au64[1];
7348 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7349}
7350#endif
7351
7352
7353/**
7354 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7355 * related.
7356 *
7357 * Raises \#GP(0) if not aligned.
7358 *
7359 * @returns Strict VBox status code.
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param pu128Dst Where to return the qword.
7362 * @param iSegReg The index of the segment register to use for
7363 * this access. The base and limits are checked.
7364 * @param GCPtrMem The address of the guest memory.
7365 */
7366VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7367{
7368 /* The lazy approach for now... */
7369 PCRTUINT128U pu128Src;
7370 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7371 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7372 if (rc == VINF_SUCCESS)
7373 {
7374 pu128Dst->au64[0] = pu128Src->au64[0];
7375 pu128Dst->au64[1] = pu128Src->au64[1];
7376 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7377 }
7378 return rc;
7379}
7380
7381
7382#ifdef IEM_WITH_SETJMP
7383/**
7384 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7385 * related, longjmp on error.
7386 *
7387 * Raises \#GP(0) if not aligned.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param pu128Dst Where to return the qword.
7391 * @param iSegReg The index of the segment register to use for
7392 * this access. The base and limits are checked.
7393 * @param GCPtrMem The address of the guest memory.
7394 */
7395void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7396 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7397{
7398 /* The lazy approach for now... */
7399 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7400 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7401 pu128Dst->au64[0] = pu128Src->au64[0];
7402 pu128Dst->au64[1] = pu128Src->au64[1];
7403 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7404}
7405#endif
7406
7407
7408/**
7409 * Fetches a data oword (octo word), generally AVX related.
7410 *
7411 * @returns Strict VBox status code.
7412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7413 * @param pu256Dst Where to return the qword.
7414 * @param iSegReg The index of the segment register to use for
7415 * this access. The base and limits are checked.
7416 * @param GCPtrMem The address of the guest memory.
7417 */
7418VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7419{
7420 /* The lazy approach for now... */
7421 PCRTUINT256U pu256Src;
7422 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7423 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7424 if (rc == VINF_SUCCESS)
7425 {
7426 pu256Dst->au64[0] = pu256Src->au64[0];
7427 pu256Dst->au64[1] = pu256Src->au64[1];
7428 pu256Dst->au64[2] = pu256Src->au64[2];
7429 pu256Dst->au64[3] = pu256Src->au64[3];
7430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7431 }
7432 return rc;
7433}
7434
7435
7436#ifdef IEM_WITH_SETJMP
7437/**
7438 * Fetches a data oword (octo word), generally AVX related.
7439 *
7440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7441 * @param pu256Dst Where to return the qword.
7442 * @param iSegReg The index of the segment register to use for
7443 * this access. The base and limits are checked.
7444 * @param GCPtrMem The address of the guest memory.
7445 */
7446void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7447{
7448 /* The lazy approach for now... */
7449 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7450 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7451 pu256Dst->au64[0] = pu256Src->au64[0];
7452 pu256Dst->au64[1] = pu256Src->au64[1];
7453 pu256Dst->au64[2] = pu256Src->au64[2];
7454 pu256Dst->au64[3] = pu256Src->au64[3];
7455 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7456}
7457#endif
7458
7459
7460/**
7461 * Fetches a data oword (octo word) at an aligned address, generally AVX
7462 * related.
7463 *
7464 * Raises \#GP(0) if not aligned.
7465 *
7466 * @returns Strict VBox status code.
7467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7468 * @param pu256Dst Where to return the qword.
7469 * @param iSegReg The index of the segment register to use for
7470 * this access. The base and limits are checked.
7471 * @param GCPtrMem The address of the guest memory.
7472 */
7473VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7474{
7475 /* The lazy approach for now... */
7476 PCRTUINT256U pu256Src;
7477 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7478 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7479 if (rc == VINF_SUCCESS)
7480 {
7481 pu256Dst->au64[0] = pu256Src->au64[0];
7482 pu256Dst->au64[1] = pu256Src->au64[1];
7483 pu256Dst->au64[2] = pu256Src->au64[2];
7484 pu256Dst->au64[3] = pu256Src->au64[3];
7485 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7486 }
7487 return rc;
7488}
7489
7490
7491#ifdef IEM_WITH_SETJMP
7492/**
7493 * Fetches a data oword (octo word) at an aligned address, generally AVX
7494 * related, longjmp on error.
7495 *
7496 * Raises \#GP(0) if not aligned.
7497 *
7498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7499 * @param pu256Dst Where to return the qword.
7500 * @param iSegReg The index of the segment register to use for
7501 * this access. The base and limits are checked.
7502 * @param GCPtrMem The address of the guest memory.
7503 */
7504void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7505 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7506{
7507 /* The lazy approach for now... */
7508 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7509 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7510 pu256Dst->au64[0] = pu256Src->au64[0];
7511 pu256Dst->au64[1] = pu256Src->au64[1];
7512 pu256Dst->au64[2] = pu256Src->au64[2];
7513 pu256Dst->au64[3] = pu256Src->au64[3];
7514 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7515}
7516#endif
7517
7518
7519
7520/**
7521 * Fetches a descriptor register (lgdt, lidt).
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param pcbLimit Where to return the limit.
7526 * @param pGCPtrBase Where to return the base.
7527 * @param iSegReg The index of the segment register to use for
7528 * this access. The base and limits are checked.
7529 * @param GCPtrMem The address of the guest memory.
7530 * @param enmOpSize The effective operand size.
7531 */
7532VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7533 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7534{
7535 /*
7536 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7537 * little special:
7538 * - The two reads are done separately.
7539 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7540 * - We suspect the 386 to actually commit the limit before the base in
7541 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7542 * don't try emulate this eccentric behavior, because it's not well
7543 * enough understood and rather hard to trigger.
7544 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7545 */
7546 VBOXSTRICTRC rcStrict;
7547 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7548 {
7549 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7550 if (rcStrict == VINF_SUCCESS)
7551 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7552 }
7553 else
7554 {
7555 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7556 if (enmOpSize == IEMMODE_32BIT)
7557 {
7558 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7559 {
7560 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7561 if (rcStrict == VINF_SUCCESS)
7562 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7563 }
7564 else
7565 {
7566 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7567 if (rcStrict == VINF_SUCCESS)
7568 {
7569 *pcbLimit = (uint16_t)uTmp;
7570 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7571 }
7572 }
7573 if (rcStrict == VINF_SUCCESS)
7574 *pGCPtrBase = uTmp;
7575 }
7576 else
7577 {
7578 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7579 if (rcStrict == VINF_SUCCESS)
7580 {
7581 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7582 if (rcStrict == VINF_SUCCESS)
7583 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7584 }
7585 }
7586 }
7587 return rcStrict;
7588}
7589
7590
7591
7592/**
7593 * Stores a data byte.
7594 *
7595 * @returns Strict VBox status code.
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 * @param iSegReg The index of the segment register to use for
7598 * this access. The base and limits are checked.
7599 * @param GCPtrMem The address of the guest memory.
7600 * @param u8Value The value to store.
7601 */
7602VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7603{
7604 /* The lazy approach for now... */
7605 uint8_t *pu8Dst;
7606 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7607 if (rc == VINF_SUCCESS)
7608 {
7609 *pu8Dst = u8Value;
7610 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7611 }
7612 return rc;
7613}
7614
7615
7616#ifdef IEM_WITH_SETJMP
7617/**
7618 * Stores a data byte, longjmp on error.
7619 *
7620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7621 * @param iSegReg The index of the segment register to use for
7622 * this access. The base and limits are checked.
7623 * @param GCPtrMem The address of the guest memory.
7624 * @param u8Value The value to store.
7625 */
7626void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7627{
7628 /* The lazy approach for now... */
7629 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7630 *pu8Dst = u8Value;
7631 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7632}
7633#endif
7634
7635
7636/**
7637 * Stores a data word.
7638 *
7639 * @returns Strict VBox status code.
7640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7641 * @param iSegReg The index of the segment register to use for
7642 * this access. The base and limits are checked.
7643 * @param GCPtrMem The address of the guest memory.
7644 * @param u16Value The value to store.
7645 */
7646VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7647{
7648 /* The lazy approach for now... */
7649 uint16_t *pu16Dst;
7650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7651 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7652 if (rc == VINF_SUCCESS)
7653 {
7654 *pu16Dst = u16Value;
7655 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7656 }
7657 return rc;
7658}
7659
7660
7661#ifdef IEM_WITH_SETJMP
7662/**
7663 * Stores a data word, longjmp on error.
7664 *
7665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7666 * @param iSegReg The index of the segment register to use for
7667 * this access. The base and limits are checked.
7668 * @param GCPtrMem The address of the guest memory.
7669 * @param u16Value The value to store.
7670 */
7671void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7672{
7673 /* The lazy approach for now... */
7674 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7675 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7676 *pu16Dst = u16Value;
7677 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7678}
7679#endif
7680
7681
7682/**
7683 * Stores a data dword.
7684 *
7685 * @returns Strict VBox status code.
7686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7687 * @param iSegReg The index of the segment register to use for
7688 * this access. The base and limits are checked.
7689 * @param GCPtrMem The address of the guest memory.
7690 * @param u32Value The value to store.
7691 */
7692VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7693{
7694 /* The lazy approach for now... */
7695 uint32_t *pu32Dst;
7696 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7697 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7698 if (rc == VINF_SUCCESS)
7699 {
7700 *pu32Dst = u32Value;
7701 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7702 }
7703 return rc;
7704}
7705
7706
7707#ifdef IEM_WITH_SETJMP
7708/**
7709 * Stores a data dword.
7710 *
7711 * @returns Strict VBox status code.
7712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7713 * @param iSegReg The index of the segment register to use for
7714 * this access. The base and limits are checked.
7715 * @param GCPtrMem The address of the guest memory.
7716 * @param u32Value The value to store.
7717 */
7718void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7719{
7720 /* The lazy approach for now... */
7721 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7722 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7723 *pu32Dst = u32Value;
7724 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7725}
7726#endif
7727
7728
7729/**
7730 * Stores a data qword.
7731 *
7732 * @returns Strict VBox status code.
7733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7734 * @param iSegReg The index of the segment register to use for
7735 * this access. The base and limits are checked.
7736 * @param GCPtrMem The address of the guest memory.
7737 * @param u64Value The value to store.
7738 */
7739VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7740{
7741 /* The lazy approach for now... */
7742 uint64_t *pu64Dst;
7743 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7744 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7745 if (rc == VINF_SUCCESS)
7746 {
7747 *pu64Dst = u64Value;
7748 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7749 }
7750 return rc;
7751}
7752
7753
7754#ifdef IEM_WITH_SETJMP
7755/**
7756 * Stores a data qword, longjmp on error.
7757 *
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param iSegReg The index of the segment register to use for
7760 * this access. The base and limits are checked.
7761 * @param GCPtrMem The address of the guest memory.
7762 * @param u64Value The value to store.
7763 */
7764void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7765{
7766 /* The lazy approach for now... */
7767 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7768 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7769 *pu64Dst = u64Value;
7770 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7771}
7772#endif
7773
7774
7775/**
7776 * Stores a data dqword.
7777 *
7778 * @returns Strict VBox status code.
7779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7780 * @param iSegReg The index of the segment register to use for
7781 * this access. The base and limits are checked.
7782 * @param GCPtrMem The address of the guest memory.
7783 * @param u128Value The value to store.
7784 */
7785VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7786{
7787 /* The lazy approach for now... */
7788 PRTUINT128U pu128Dst;
7789 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7790 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7791 if (rc == VINF_SUCCESS)
7792 {
7793 pu128Dst->au64[0] = u128Value.au64[0];
7794 pu128Dst->au64[1] = u128Value.au64[1];
7795 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7796 }
7797 return rc;
7798}
7799
7800
7801#ifdef IEM_WITH_SETJMP
7802/**
7803 * Stores a data dqword, longjmp on error.
7804 *
7805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7806 * @param iSegReg The index of the segment register to use for
7807 * this access. The base and limits are checked.
7808 * @param GCPtrMem The address of the guest memory.
7809 * @param u128Value The value to store.
7810 */
7811void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7812{
7813 /* The lazy approach for now... */
7814 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7815 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7816 pu128Dst->au64[0] = u128Value.au64[0];
7817 pu128Dst->au64[1] = u128Value.au64[1];
7818 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7819}
7820#endif
7821
7822
7823/**
7824 * Stores a data dqword, SSE aligned.
7825 *
7826 * @returns Strict VBox status code.
7827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7828 * @param iSegReg The index of the segment register to use for
7829 * this access. The base and limits are checked.
7830 * @param GCPtrMem The address of the guest memory.
7831 * @param u128Value The value to store.
7832 */
7833VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7834{
7835 /* The lazy approach for now... */
7836 PRTUINT128U pu128Dst;
7837 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7838 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7839 if (rc == VINF_SUCCESS)
7840 {
7841 pu128Dst->au64[0] = u128Value.au64[0];
7842 pu128Dst->au64[1] = u128Value.au64[1];
7843 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7844 }
7845 return rc;
7846}
7847
7848
7849#ifdef IEM_WITH_SETJMP
7850/**
7851 * Stores a data dqword, SSE aligned.
7852 *
7853 * @returns Strict VBox status code.
7854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7855 * @param iSegReg The index of the segment register to use for
7856 * this access. The base and limits are checked.
7857 * @param GCPtrMem The address of the guest memory.
7858 * @param u128Value The value to store.
7859 */
7860void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7861 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7862{
7863 /* The lazy approach for now... */
7864 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7865 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7866 pu128Dst->au64[0] = u128Value.au64[0];
7867 pu128Dst->au64[1] = u128Value.au64[1];
7868 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7869}
7870#endif
7871
7872
7873/**
7874 * Stores a data dqword.
7875 *
7876 * @returns Strict VBox status code.
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param iSegReg The index of the segment register to use for
7879 * this access. The base and limits are checked.
7880 * @param GCPtrMem The address of the guest memory.
7881 * @param pu256Value Pointer to the value to store.
7882 */
7883VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7884{
7885 /* The lazy approach for now... */
7886 PRTUINT256U pu256Dst;
7887 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7888 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7889 if (rc == VINF_SUCCESS)
7890 {
7891 pu256Dst->au64[0] = pu256Value->au64[0];
7892 pu256Dst->au64[1] = pu256Value->au64[1];
7893 pu256Dst->au64[2] = pu256Value->au64[2];
7894 pu256Dst->au64[3] = pu256Value->au64[3];
7895 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7896 }
7897 return rc;
7898}
7899
7900
7901#ifdef IEM_WITH_SETJMP
7902/**
7903 * Stores a data dqword, longjmp on error.
7904 *
7905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7906 * @param iSegReg The index of the segment register to use for
7907 * this access. The base and limits are checked.
7908 * @param GCPtrMem The address of the guest memory.
7909 * @param pu256Value Pointer to the value to store.
7910 */
7911void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7912{
7913 /* The lazy approach for now... */
7914 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7915 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7916 pu256Dst->au64[0] = pu256Value->au64[0];
7917 pu256Dst->au64[1] = pu256Value->au64[1];
7918 pu256Dst->au64[2] = pu256Value->au64[2];
7919 pu256Dst->au64[3] = pu256Value->au64[3];
7920 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7921}
7922#endif
7923
7924
7925/**
7926 * Stores a data dqword, AVX \#GP(0) aligned.
7927 *
7928 * @returns Strict VBox status code.
7929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7930 * @param iSegReg The index of the segment register to use for
7931 * this access. The base and limits are checked.
7932 * @param GCPtrMem The address of the guest memory.
7933 * @param pu256Value Pointer to the value to store.
7934 */
7935VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7936{
7937 /* The lazy approach for now... */
7938 PRTUINT256U pu256Dst;
7939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7940 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7941 if (rc == VINF_SUCCESS)
7942 {
7943 pu256Dst->au64[0] = pu256Value->au64[0];
7944 pu256Dst->au64[1] = pu256Value->au64[1];
7945 pu256Dst->au64[2] = pu256Value->au64[2];
7946 pu256Dst->au64[3] = pu256Value->au64[3];
7947 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7948 }
7949 return rc;
7950}
7951
7952
7953#ifdef IEM_WITH_SETJMP
7954/**
7955 * Stores a data dqword, AVX aligned.
7956 *
7957 * @returns Strict VBox status code.
7958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7959 * @param iSegReg The index of the segment register to use for
7960 * this access. The base and limits are checked.
7961 * @param GCPtrMem The address of the guest memory.
7962 * @param pu256Value Pointer to the value to store.
7963 */
7964void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7965 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7966{
7967 /* The lazy approach for now... */
7968 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7969 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7970 pu256Dst->au64[0] = pu256Value->au64[0];
7971 pu256Dst->au64[1] = pu256Value->au64[1];
7972 pu256Dst->au64[2] = pu256Value->au64[2];
7973 pu256Dst->au64[3] = pu256Value->au64[3];
7974 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7975}
7976#endif
7977
7978
7979/**
7980 * Stores a descriptor register (sgdt, sidt).
7981 *
7982 * @returns Strict VBox status code.
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param cbLimit The limit.
7985 * @param GCPtrBase The base address.
7986 * @param iSegReg The index of the segment register to use for
7987 * this access. The base and limits are checked.
7988 * @param GCPtrMem The address of the guest memory.
7989 */
7990VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7991{
7992 /*
7993 * The SIDT and SGDT instructions actually stores the data using two
7994 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7995 * does not respond to opsize prefixes.
7996 */
7997 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7998 if (rcStrict == VINF_SUCCESS)
7999 {
8000 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
8001 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8002 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8003 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8004 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
8005 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8006 else
8007 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8008 }
8009 return rcStrict;
8010}
8011
8012
8013/**
8014 * Pushes a word onto the stack.
8015 *
8016 * @returns Strict VBox status code.
8017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8018 * @param u16Value The value to push.
8019 */
8020VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8021{
8022 /* Increment the stack pointer. */
8023 uint64_t uNewRsp;
8024 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8025
8026 /* Write the word the lazy way. */
8027 uint16_t *pu16Dst;
8028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8029 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8030 if (rc == VINF_SUCCESS)
8031 {
8032 *pu16Dst = u16Value;
8033 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8034 }
8035
8036 /* Commit the new RSP value unless we an access handler made trouble. */
8037 if (rc == VINF_SUCCESS)
8038 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8039
8040 return rc;
8041}
8042
8043
8044/**
8045 * Pushes a dword onto the stack.
8046 *
8047 * @returns Strict VBox status code.
8048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8049 * @param u32Value The value to push.
8050 */
8051VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8052{
8053 /* Increment the stack pointer. */
8054 uint64_t uNewRsp;
8055 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8056
8057 /* Write the dword the lazy way. */
8058 uint32_t *pu32Dst;
8059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8060 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8061 if (rc == VINF_SUCCESS)
8062 {
8063 *pu32Dst = u32Value;
8064 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8065 }
8066
8067 /* Commit the new RSP value unless we an access handler made trouble. */
8068 if (rc == VINF_SUCCESS)
8069 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8070
8071 return rc;
8072}
8073
8074
8075/**
8076 * Pushes a dword segment register value onto the stack.
8077 *
8078 * @returns Strict VBox status code.
8079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8080 * @param u32Value The value to push.
8081 */
8082VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8083{
8084 /* Increment the stack pointer. */
8085 uint64_t uNewRsp;
8086 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8087
8088 /* The intel docs talks about zero extending the selector register
8089 value. My actual intel CPU here might be zero extending the value
8090 but it still only writes the lower word... */
8091 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8092 * happens when crossing an electric page boundrary, is the high word checked
8093 * for write accessibility or not? Probably it is. What about segment limits?
8094 * It appears this behavior is also shared with trap error codes.
8095 *
8096 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8097 * ancient hardware when it actually did change. */
8098 uint16_t *pu16Dst;
8099 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8100 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8101 if (rc == VINF_SUCCESS)
8102 {
8103 *pu16Dst = (uint16_t)u32Value;
8104 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8105 }
8106
8107 /* Commit the new RSP value unless we an access handler made trouble. */
8108 if (rc == VINF_SUCCESS)
8109 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8110
8111 return rc;
8112}
8113
8114
8115/**
8116 * Pushes a qword onto the stack.
8117 *
8118 * @returns Strict VBox status code.
8119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8120 * @param u64Value The value to push.
8121 */
8122VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8123{
8124 /* Increment the stack pointer. */
8125 uint64_t uNewRsp;
8126 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8127
8128 /* Write the word the lazy way. */
8129 uint64_t *pu64Dst;
8130 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8131 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8132 if (rc == VINF_SUCCESS)
8133 {
8134 *pu64Dst = u64Value;
8135 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8136 }
8137
8138 /* Commit the new RSP value unless we an access handler made trouble. */
8139 if (rc == VINF_SUCCESS)
8140 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8141
8142 return rc;
8143}
8144
8145
8146/**
8147 * Pops a word from the stack.
8148 *
8149 * @returns Strict VBox status code.
8150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8151 * @param pu16Value Where to store the popped value.
8152 */
8153VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8154{
8155 /* Increment the stack pointer. */
8156 uint64_t uNewRsp;
8157 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8158
8159 /* Write the word the lazy way. */
8160 uint16_t const *pu16Src;
8161 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8162 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8163 if (rc == VINF_SUCCESS)
8164 {
8165 *pu16Value = *pu16Src;
8166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8167
8168 /* Commit the new RSP value. */
8169 if (rc == VINF_SUCCESS)
8170 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8171 }
8172
8173 return rc;
8174}
8175
8176
8177/**
8178 * Pops a dword from the stack.
8179 *
8180 * @returns Strict VBox status code.
8181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8182 * @param pu32Value Where to store the popped value.
8183 */
8184VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8185{
8186 /* Increment the stack pointer. */
8187 uint64_t uNewRsp;
8188 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8189
8190 /* Write the word the lazy way. */
8191 uint32_t const *pu32Src;
8192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8193 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8194 if (rc == VINF_SUCCESS)
8195 {
8196 *pu32Value = *pu32Src;
8197 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8198
8199 /* Commit the new RSP value. */
8200 if (rc == VINF_SUCCESS)
8201 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8202 }
8203
8204 return rc;
8205}
8206
8207
8208/**
8209 * Pops a qword from the stack.
8210 *
8211 * @returns Strict VBox status code.
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param pu64Value Where to store the popped value.
8214 */
8215VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8216{
8217 /* Increment the stack pointer. */
8218 uint64_t uNewRsp;
8219 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8220
8221 /* Write the word the lazy way. */
8222 uint64_t const *pu64Src;
8223 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8224 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8225 if (rc == VINF_SUCCESS)
8226 {
8227 *pu64Value = *pu64Src;
8228 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8229
8230 /* Commit the new RSP value. */
8231 if (rc == VINF_SUCCESS)
8232 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8233 }
8234
8235 return rc;
8236}
8237
8238
8239/**
8240 * Pushes a word onto the stack, using a temporary stack pointer.
8241 *
8242 * @returns Strict VBox status code.
8243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8244 * @param u16Value The value to push.
8245 * @param pTmpRsp Pointer to the temporary stack pointer.
8246 */
8247VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8248{
8249 /* Increment the stack pointer. */
8250 RTUINT64U NewRsp = *pTmpRsp;
8251 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8252
8253 /* Write the word the lazy way. */
8254 uint16_t *pu16Dst;
8255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8256 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8257 if (rc == VINF_SUCCESS)
8258 {
8259 *pu16Dst = u16Value;
8260 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8261 }
8262
8263 /* Commit the new RSP value unless we an access handler made trouble. */
8264 if (rc == VINF_SUCCESS)
8265 *pTmpRsp = NewRsp;
8266
8267 return rc;
8268}
8269
8270
8271/**
8272 * Pushes a dword onto the stack, using a temporary stack pointer.
8273 *
8274 * @returns Strict VBox status code.
8275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8276 * @param u32Value The value to push.
8277 * @param pTmpRsp Pointer to the temporary stack pointer.
8278 */
8279VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8280{
8281 /* Increment the stack pointer. */
8282 RTUINT64U NewRsp = *pTmpRsp;
8283 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8284
8285 /* Write the word the lazy way. */
8286 uint32_t *pu32Dst;
8287 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8288 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8289 if (rc == VINF_SUCCESS)
8290 {
8291 *pu32Dst = u32Value;
8292 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8293 }
8294
8295 /* Commit the new RSP value unless we an access handler made trouble. */
8296 if (rc == VINF_SUCCESS)
8297 *pTmpRsp = NewRsp;
8298
8299 return rc;
8300}
8301
8302
8303/**
8304 * Pushes a dword onto the stack, using a temporary stack pointer.
8305 *
8306 * @returns Strict VBox status code.
8307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8308 * @param u64Value The value to push.
8309 * @param pTmpRsp Pointer to the temporary stack pointer.
8310 */
8311VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8312{
8313 /* Increment the stack pointer. */
8314 RTUINT64U NewRsp = *pTmpRsp;
8315 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8316
8317 /* Write the word the lazy way. */
8318 uint64_t *pu64Dst;
8319 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8320 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8321 if (rc == VINF_SUCCESS)
8322 {
8323 *pu64Dst = u64Value;
8324 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8325 }
8326
8327 /* Commit the new RSP value unless we an access handler made trouble. */
8328 if (rc == VINF_SUCCESS)
8329 *pTmpRsp = NewRsp;
8330
8331 return rc;
8332}
8333
8334
8335/**
8336 * Pops a word from the stack, using a temporary stack pointer.
8337 *
8338 * @returns Strict VBox status code.
8339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8340 * @param pu16Value Where to store the popped value.
8341 * @param pTmpRsp Pointer to the temporary stack pointer.
8342 */
8343VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8344{
8345 /* Increment the stack pointer. */
8346 RTUINT64U NewRsp = *pTmpRsp;
8347 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8348
8349 /* Write the word the lazy way. */
8350 uint16_t const *pu16Src;
8351 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8352 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8353 if (rc == VINF_SUCCESS)
8354 {
8355 *pu16Value = *pu16Src;
8356 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8357
8358 /* Commit the new RSP value. */
8359 if (rc == VINF_SUCCESS)
8360 *pTmpRsp = NewRsp;
8361 }
8362
8363 return rc;
8364}
8365
8366
8367/**
8368 * Pops a dword from the stack, using a temporary stack pointer.
8369 *
8370 * @returns Strict VBox status code.
8371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8372 * @param pu32Value Where to store the popped value.
8373 * @param pTmpRsp Pointer to the temporary stack pointer.
8374 */
8375VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8376{
8377 /* Increment the stack pointer. */
8378 RTUINT64U NewRsp = *pTmpRsp;
8379 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8380
8381 /* Write the word the lazy way. */
8382 uint32_t const *pu32Src;
8383 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8384 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8385 if (rc == VINF_SUCCESS)
8386 {
8387 *pu32Value = *pu32Src;
8388 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8389
8390 /* Commit the new RSP value. */
8391 if (rc == VINF_SUCCESS)
8392 *pTmpRsp = NewRsp;
8393 }
8394
8395 return rc;
8396}
8397
8398
8399/**
8400 * Pops a qword from the stack, using a temporary stack pointer.
8401 *
8402 * @returns Strict VBox status code.
8403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8404 * @param pu64Value Where to store the popped value.
8405 * @param pTmpRsp Pointer to the temporary stack pointer.
8406 */
8407VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8408{
8409 /* Increment the stack pointer. */
8410 RTUINT64U NewRsp = *pTmpRsp;
8411 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8412
8413 /* Write the word the lazy way. */
8414 uint64_t const *pu64Src;
8415 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8416 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8417 if (rcStrict == VINF_SUCCESS)
8418 {
8419 *pu64Value = *pu64Src;
8420 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8421
8422 /* Commit the new RSP value. */
8423 if (rcStrict == VINF_SUCCESS)
8424 *pTmpRsp = NewRsp;
8425 }
8426
8427 return rcStrict;
8428}
8429
8430
8431/**
8432 * Begin a special stack push (used by interrupt, exceptions and such).
8433 *
8434 * This will raise \#SS or \#PF if appropriate.
8435 *
8436 * @returns Strict VBox status code.
8437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8438 * @param cbMem The number of bytes to push onto the stack.
8439 * @param cbAlign The alignment mask (7, 3, 1).
8440 * @param ppvMem Where to return the pointer to the stack memory.
8441 * As with the other memory functions this could be
8442 * direct access or bounce buffered access, so
8443 * don't commit register until the commit call
8444 * succeeds.
8445 * @param puNewRsp Where to return the new RSP value. This must be
8446 * passed unchanged to
8447 * iemMemStackPushCommitSpecial().
8448 */
8449VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8450 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8451{
8452 Assert(cbMem < UINT8_MAX);
8453 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8454 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8455 IEM_ACCESS_STACK_W, cbAlign);
8456}
8457
8458
8459/**
8460 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8461 *
8462 * This will update the rSP.
8463 *
8464 * @returns Strict VBox status code.
8465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8466 * @param pvMem The pointer returned by
8467 * iemMemStackPushBeginSpecial().
8468 * @param uNewRsp The new RSP value returned by
8469 * iemMemStackPushBeginSpecial().
8470 */
8471VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8472{
8473 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8474 if (rcStrict == VINF_SUCCESS)
8475 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8476 return rcStrict;
8477}
8478
8479
8480/**
8481 * Begin a special stack pop (used by iret, retf and such).
8482 *
8483 * This will raise \#SS or \#PF if appropriate.
8484 *
8485 * @returns Strict VBox status code.
8486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8487 * @param cbMem The number of bytes to pop from the stack.
8488 * @param cbAlign The alignment mask (7, 3, 1).
8489 * @param ppvMem Where to return the pointer to the stack memory.
8490 * @param puNewRsp Where to return the new RSP value. This must be
8491 * assigned to CPUMCTX::rsp manually some time
8492 * after iemMemStackPopDoneSpecial() has been
8493 * called.
8494 */
8495VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8496 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8497{
8498 Assert(cbMem < UINT8_MAX);
8499 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8500 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8501}
8502
8503
8504/**
8505 * Continue a special stack pop (used by iret and retf), for the purpose of
8506 * retrieving a new stack pointer.
8507 *
8508 * This will raise \#SS or \#PF if appropriate.
8509 *
8510 * @returns Strict VBox status code.
8511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8512 * @param off Offset from the top of the stack. This is zero
8513 * except in the retf case.
8514 * @param cbMem The number of bytes to pop from the stack.
8515 * @param ppvMem Where to return the pointer to the stack memory.
8516 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8517 * return this because all use of this function is
8518 * to retrieve a new value and anything we return
8519 * here would be discarded.)
8520 */
8521VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8522 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8523{
8524 Assert(cbMem < UINT8_MAX);
8525
8526 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8527 RTGCPTR GCPtrTop;
8528 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8529 GCPtrTop = uCurNewRsp;
8530 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8531 GCPtrTop = (uint32_t)uCurNewRsp;
8532 else
8533 GCPtrTop = (uint16_t)uCurNewRsp;
8534
8535 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8536 0 /* checked in iemMemStackPopBeginSpecial */);
8537}
8538
8539
8540/**
8541 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8542 * iemMemStackPopContinueSpecial).
8543 *
8544 * The caller will manually commit the rSP.
8545 *
8546 * @returns Strict VBox status code.
8547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8548 * @param pvMem The pointer returned by
8549 * iemMemStackPopBeginSpecial() or
8550 * iemMemStackPopContinueSpecial().
8551 */
8552VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8553{
8554 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8555}
8556
8557
8558/**
8559 * Fetches a system table byte.
8560 *
8561 * @returns Strict VBox status code.
8562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8563 * @param pbDst Where to return the byte.
8564 * @param iSegReg The index of the segment register to use for
8565 * this access. The base and limits are checked.
8566 * @param GCPtrMem The address of the guest memory.
8567 */
8568VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8569{
8570 /* The lazy approach for now... */
8571 uint8_t const *pbSrc;
8572 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8573 if (rc == VINF_SUCCESS)
8574 {
8575 *pbDst = *pbSrc;
8576 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8577 }
8578 return rc;
8579}
8580
8581
8582/**
8583 * Fetches a system table word.
8584 *
8585 * @returns Strict VBox status code.
8586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8587 * @param pu16Dst Where to return the word.
8588 * @param iSegReg The index of the segment register to use for
8589 * this access. The base and limits are checked.
8590 * @param GCPtrMem The address of the guest memory.
8591 */
8592VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8593{
8594 /* The lazy approach for now... */
8595 uint16_t const *pu16Src;
8596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8597 if (rc == VINF_SUCCESS)
8598 {
8599 *pu16Dst = *pu16Src;
8600 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8601 }
8602 return rc;
8603}
8604
8605
8606/**
8607 * Fetches a system table dword.
8608 *
8609 * @returns Strict VBox status code.
8610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8611 * @param pu32Dst Where to return the dword.
8612 * @param iSegReg The index of the segment register to use for
8613 * this access. The base and limits are checked.
8614 * @param GCPtrMem The address of the guest memory.
8615 */
8616VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8617{
8618 /* The lazy approach for now... */
8619 uint32_t const *pu32Src;
8620 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8621 if (rc == VINF_SUCCESS)
8622 {
8623 *pu32Dst = *pu32Src;
8624 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8625 }
8626 return rc;
8627}
8628
8629
8630/**
8631 * Fetches a system table qword.
8632 *
8633 * @returns Strict VBox status code.
8634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8635 * @param pu64Dst Where to return the qword.
8636 * @param iSegReg The index of the segment register to use for
8637 * this access. The base and limits are checked.
8638 * @param GCPtrMem The address of the guest memory.
8639 */
8640VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8641{
8642 /* The lazy approach for now... */
8643 uint64_t const *pu64Src;
8644 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8645 if (rc == VINF_SUCCESS)
8646 {
8647 *pu64Dst = *pu64Src;
8648 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8649 }
8650 return rc;
8651}
8652
8653
8654/**
8655 * Fetches a descriptor table entry with caller specified error code.
8656 *
8657 * @returns Strict VBox status code.
8658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8659 * @param pDesc Where to return the descriptor table entry.
8660 * @param uSel The selector which table entry to fetch.
8661 * @param uXcpt The exception to raise on table lookup error.
8662 * @param uErrorCode The error code associated with the exception.
8663 */
8664static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8665 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8666{
8667 AssertPtr(pDesc);
8668 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8669
8670 /** @todo did the 286 require all 8 bytes to be accessible? */
8671 /*
8672 * Get the selector table base and check bounds.
8673 */
8674 RTGCPTR GCPtrBase;
8675 if (uSel & X86_SEL_LDT)
8676 {
8677 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8678 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8679 {
8680 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8681 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8682 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8683 uErrorCode, 0);
8684 }
8685
8686 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8687 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8688 }
8689 else
8690 {
8691 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8692 {
8693 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8694 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8695 uErrorCode, 0);
8696 }
8697 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8698 }
8699
8700 /*
8701 * Read the legacy descriptor and maybe the long mode extensions if
8702 * required.
8703 */
8704 VBOXSTRICTRC rcStrict;
8705 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8706 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8707 else
8708 {
8709 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8710 if (rcStrict == VINF_SUCCESS)
8711 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8712 if (rcStrict == VINF_SUCCESS)
8713 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8714 if (rcStrict == VINF_SUCCESS)
8715 pDesc->Legacy.au16[3] = 0;
8716 else
8717 return rcStrict;
8718 }
8719
8720 if (rcStrict == VINF_SUCCESS)
8721 {
8722 if ( !IEM_IS_LONG_MODE(pVCpu)
8723 || pDesc->Legacy.Gen.u1DescType)
8724 pDesc->Long.au64[1] = 0;
8725 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8726 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8727 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8728 else
8729 {
8730 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8731 /** @todo is this the right exception? */
8732 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8733 }
8734 }
8735 return rcStrict;
8736}
8737
8738
8739/**
8740 * Fetches a descriptor table entry.
8741 *
8742 * @returns Strict VBox status code.
8743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8744 * @param pDesc Where to return the descriptor table entry.
8745 * @param uSel The selector which table entry to fetch.
8746 * @param uXcpt The exception to raise on table lookup error.
8747 */
8748VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8749{
8750 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8751}
8752
8753
8754/**
8755 * Marks the selector descriptor as accessed (only non-system descriptors).
8756 *
8757 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8758 * will therefore skip the limit checks.
8759 *
8760 * @returns Strict VBox status code.
8761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8762 * @param uSel The selector.
8763 */
8764VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8765{
8766 /*
8767 * Get the selector table base and calculate the entry address.
8768 */
8769 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8770 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8771 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8772 GCPtr += uSel & X86_SEL_MASK;
8773
8774 /*
8775 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8776 * ugly stuff to avoid this. This will make sure it's an atomic access
8777 * as well more or less remove any question about 8-bit or 32-bit accesss.
8778 */
8779 VBOXSTRICTRC rcStrict;
8780 uint32_t volatile *pu32;
8781 if ((GCPtr & 3) == 0)
8782 {
8783 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8784 GCPtr += 2 + 2;
8785 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8786 if (rcStrict != VINF_SUCCESS)
8787 return rcStrict;
8788 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8789 }
8790 else
8791 {
8792 /* The misaligned GDT/LDT case, map the whole thing. */
8793 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8794 if (rcStrict != VINF_SUCCESS)
8795 return rcStrict;
8796 switch ((uintptr_t)pu32 & 3)
8797 {
8798 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8799 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8800 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8801 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8802 }
8803 }
8804
8805 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8806}
8807
8808/** @} */
8809
8810/** @name Opcode Helpers.
8811 * @{
8812 */
8813
8814/**
8815 * Calculates the effective address of a ModR/M memory operand.
8816 *
8817 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8818 *
8819 * @return Strict VBox status code.
8820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8821 * @param bRm The ModRM byte.
8822 * @param cbImmAndRspOffset - First byte: The size of any immediate
8823 * following the effective address opcode bytes
8824 * (only for RIP relative addressing).
8825 * - Second byte: RSP displacement (for POP [ESP]).
8826 * @param pGCPtrEff Where to return the effective address.
8827 */
8828VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8829{
8830 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8831# define SET_SS_DEF() \
8832 do \
8833 { \
8834 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8835 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8836 } while (0)
8837
8838 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8839 {
8840/** @todo Check the effective address size crap! */
8841 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8842 {
8843 uint16_t u16EffAddr;
8844
8845 /* Handle the disp16 form with no registers first. */
8846 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8847 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8848 else
8849 {
8850 /* Get the displacment. */
8851 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8852 {
8853 case 0: u16EffAddr = 0; break;
8854 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8855 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8856 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8857 }
8858
8859 /* Add the base and index registers to the disp. */
8860 switch (bRm & X86_MODRM_RM_MASK)
8861 {
8862 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8863 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8864 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8865 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8866 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8867 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8868 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8869 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8870 }
8871 }
8872
8873 *pGCPtrEff = u16EffAddr;
8874 }
8875 else
8876 {
8877 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8878 uint32_t u32EffAddr;
8879
8880 /* Handle the disp32 form with no registers first. */
8881 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8882 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8883 else
8884 {
8885 /* Get the register (or SIB) value. */
8886 switch ((bRm & X86_MODRM_RM_MASK))
8887 {
8888 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8889 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8890 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8891 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8892 case 4: /* SIB */
8893 {
8894 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8895
8896 /* Get the index and scale it. */
8897 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8898 {
8899 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8900 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8901 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8902 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8903 case 4: u32EffAddr = 0; /*none */ break;
8904 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8905 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8906 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8907 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8908 }
8909 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8910
8911 /* add base */
8912 switch (bSib & X86_SIB_BASE_MASK)
8913 {
8914 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8915 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8916 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8917 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8918 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8919 case 5:
8920 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8921 {
8922 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8923 SET_SS_DEF();
8924 }
8925 else
8926 {
8927 uint32_t u32Disp;
8928 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8929 u32EffAddr += u32Disp;
8930 }
8931 break;
8932 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8933 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8934 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8935 }
8936 break;
8937 }
8938 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8939 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8940 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8941 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8942 }
8943
8944 /* Get and add the displacement. */
8945 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8946 {
8947 case 0:
8948 break;
8949 case 1:
8950 {
8951 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8952 u32EffAddr += i8Disp;
8953 break;
8954 }
8955 case 2:
8956 {
8957 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8958 u32EffAddr += u32Disp;
8959 break;
8960 }
8961 default:
8962 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8963 }
8964
8965 }
8966 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8967 *pGCPtrEff = u32EffAddr;
8968 else
8969 {
8970 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8971 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8972 }
8973 }
8974 }
8975 else
8976 {
8977 uint64_t u64EffAddr;
8978
8979 /* Handle the rip+disp32 form with no registers first. */
8980 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8981 {
8982 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8983 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8984 }
8985 else
8986 {
8987 /* Get the register (or SIB) value. */
8988 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8989 {
8990 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8991 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8992 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8993 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8994 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8995 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8996 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8997 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8998 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8999 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9000 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9001 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9002 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9003 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9004 /* SIB */
9005 case 4:
9006 case 12:
9007 {
9008 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9009
9010 /* Get the index and scale it. */
9011 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9012 {
9013 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9014 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9015 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9016 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9017 case 4: u64EffAddr = 0; /*none */ break;
9018 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9019 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9020 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9021 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9022 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9023 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9024 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9025 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9026 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9027 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9028 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9029 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9030 }
9031 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9032
9033 /* add base */
9034 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9035 {
9036 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9037 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9038 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9039 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9040 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9041 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9042 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9043 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9044 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9045 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9046 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9047 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9048 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9049 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9050 /* complicated encodings */
9051 case 5:
9052 case 13:
9053 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9054 {
9055 if (!pVCpu->iem.s.uRexB)
9056 {
9057 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9058 SET_SS_DEF();
9059 }
9060 else
9061 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9062 }
9063 else
9064 {
9065 uint32_t u32Disp;
9066 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9067 u64EffAddr += (int32_t)u32Disp;
9068 }
9069 break;
9070 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9071 }
9072 break;
9073 }
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9075 }
9076
9077 /* Get and add the displacement. */
9078 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9079 {
9080 case 0:
9081 break;
9082 case 1:
9083 {
9084 int8_t i8Disp;
9085 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9086 u64EffAddr += i8Disp;
9087 break;
9088 }
9089 case 2:
9090 {
9091 uint32_t u32Disp;
9092 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9093 u64EffAddr += (int32_t)u32Disp;
9094 break;
9095 }
9096 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9097 }
9098
9099 }
9100
9101 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9102 *pGCPtrEff = u64EffAddr;
9103 else
9104 {
9105 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9106 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9107 }
9108 }
9109
9110 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9111 return VINF_SUCCESS;
9112}
9113
9114
9115#ifdef IEM_WITH_SETJMP
9116/**
9117 * Calculates the effective address of a ModR/M memory operand.
9118 *
9119 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9120 *
9121 * May longjmp on internal error.
9122 *
9123 * @return The effective address.
9124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9125 * @param bRm The ModRM byte.
9126 * @param cbImmAndRspOffset - First byte: The size of any immediate
9127 * following the effective address opcode bytes
9128 * (only for RIP relative addressing).
9129 * - Second byte: RSP displacement (for POP [ESP]).
9130 */
9131RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9132{
9133 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9134# define SET_SS_DEF() \
9135 do \
9136 { \
9137 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9138 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9139 } while (0)
9140
9141 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9142 {
9143/** @todo Check the effective address size crap! */
9144 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9145 {
9146 uint16_t u16EffAddr;
9147
9148 /* Handle the disp16 form with no registers first. */
9149 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9150 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9151 else
9152 {
9153 /* Get the displacment. */
9154 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9155 {
9156 case 0: u16EffAddr = 0; break;
9157 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9158 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9159 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9160 }
9161
9162 /* Add the base and index registers to the disp. */
9163 switch (bRm & X86_MODRM_RM_MASK)
9164 {
9165 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9166 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9167 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9168 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9169 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9170 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9171 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9172 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9173 }
9174 }
9175
9176 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9177 return u16EffAddr;
9178 }
9179
9180 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9181 uint32_t u32EffAddr;
9182
9183 /* Handle the disp32 form with no registers first. */
9184 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9185 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9186 else
9187 {
9188 /* Get the register (or SIB) value. */
9189 switch ((bRm & X86_MODRM_RM_MASK))
9190 {
9191 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9192 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9193 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9194 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9195 case 4: /* SIB */
9196 {
9197 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9198
9199 /* Get the index and scale it. */
9200 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9201 {
9202 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9203 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9204 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9205 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9206 case 4: u32EffAddr = 0; /*none */ break;
9207 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9208 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9209 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9210 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9211 }
9212 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9213
9214 /* add base */
9215 switch (bSib & X86_SIB_BASE_MASK)
9216 {
9217 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9218 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9219 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9220 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9221 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9222 case 5:
9223 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9224 {
9225 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9226 SET_SS_DEF();
9227 }
9228 else
9229 {
9230 uint32_t u32Disp;
9231 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9232 u32EffAddr += u32Disp;
9233 }
9234 break;
9235 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9236 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9237 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9238 }
9239 break;
9240 }
9241 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9242 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9243 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9244 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9245 }
9246
9247 /* Get and add the displacement. */
9248 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9249 {
9250 case 0:
9251 break;
9252 case 1:
9253 {
9254 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9255 u32EffAddr += i8Disp;
9256 break;
9257 }
9258 case 2:
9259 {
9260 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9261 u32EffAddr += u32Disp;
9262 break;
9263 }
9264 default:
9265 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9266 }
9267 }
9268
9269 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9270 {
9271 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9272 return u32EffAddr;
9273 }
9274 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9275 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9276 return u32EffAddr & UINT16_MAX;
9277 }
9278
9279 uint64_t u64EffAddr;
9280
9281 /* Handle the rip+disp32 form with no registers first. */
9282 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9283 {
9284 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9285 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9286 }
9287 else
9288 {
9289 /* Get the register (or SIB) value. */
9290 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9291 {
9292 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9293 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9294 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9295 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9296 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9297 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9298 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9299 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9300 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9301 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9302 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9303 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9304 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9305 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9306 /* SIB */
9307 case 4:
9308 case 12:
9309 {
9310 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9311
9312 /* Get the index and scale it. */
9313 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9314 {
9315 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9316 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9317 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9318 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9319 case 4: u64EffAddr = 0; /*none */ break;
9320 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9321 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9322 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9323 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9324 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9325 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9326 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9327 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9328 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9329 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9330 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9331 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9332 }
9333 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9334
9335 /* add base */
9336 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9337 {
9338 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9339 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9340 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9341 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9342 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9343 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9344 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9345 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9346 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9347 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9348 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9349 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9350 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9351 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9352 /* complicated encodings */
9353 case 5:
9354 case 13:
9355 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9356 {
9357 if (!pVCpu->iem.s.uRexB)
9358 {
9359 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9360 SET_SS_DEF();
9361 }
9362 else
9363 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9364 }
9365 else
9366 {
9367 uint32_t u32Disp;
9368 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9369 u64EffAddr += (int32_t)u32Disp;
9370 }
9371 break;
9372 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9373 }
9374 break;
9375 }
9376 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9377 }
9378
9379 /* Get and add the displacement. */
9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9381 {
9382 case 0:
9383 break;
9384 case 1:
9385 {
9386 int8_t i8Disp;
9387 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9388 u64EffAddr += i8Disp;
9389 break;
9390 }
9391 case 2:
9392 {
9393 uint32_t u32Disp;
9394 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9395 u64EffAddr += (int32_t)u32Disp;
9396 break;
9397 }
9398 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9399 }
9400
9401 }
9402
9403 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9404 {
9405 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9406 return u64EffAddr;
9407 }
9408 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9409 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9410 return u64EffAddr & UINT32_MAX;
9411}
9412#endif /* IEM_WITH_SETJMP */
9413
9414
9415/**
9416 * Calculates the effective address of a ModR/M memory operand, extended version
9417 * for use in the recompilers.
9418 *
9419 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9420 *
9421 * @return Strict VBox status code.
9422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9423 * @param bRm The ModRM byte.
9424 * @param cbImmAndRspOffset - First byte: The size of any immediate
9425 * following the effective address opcode bytes
9426 * (only for RIP relative addressing).
9427 * - Second byte: RSP displacement (for POP [ESP]).
9428 * @param pGCPtrEff Where to return the effective address.
9429 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9430 * SIB byte (bits 39:32).
9431 */
9432VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9433{
9434 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9435# define SET_SS_DEF() \
9436 do \
9437 { \
9438 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9439 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9440 } while (0)
9441
9442 uint64_t uInfo;
9443 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9444 {
9445/** @todo Check the effective address size crap! */
9446 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9447 {
9448 uint16_t u16EffAddr;
9449
9450 /* Handle the disp16 form with no registers first. */
9451 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9452 {
9453 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9454 uInfo = u16EffAddr;
9455 }
9456 else
9457 {
9458 /* Get the displacment. */
9459 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9460 {
9461 case 0: u16EffAddr = 0; break;
9462 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9463 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9464 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9465 }
9466 uInfo = u16EffAddr;
9467
9468 /* Add the base and index registers to the disp. */
9469 switch (bRm & X86_MODRM_RM_MASK)
9470 {
9471 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9472 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9473 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9474 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9475 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9476 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9477 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9478 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9479 }
9480 }
9481
9482 *pGCPtrEff = u16EffAddr;
9483 }
9484 else
9485 {
9486 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9487 uint32_t u32EffAddr;
9488
9489 /* Handle the disp32 form with no registers first. */
9490 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9491 {
9492 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9493 uInfo = u32EffAddr;
9494 }
9495 else
9496 {
9497 /* Get the register (or SIB) value. */
9498 uInfo = 0;
9499 switch ((bRm & X86_MODRM_RM_MASK))
9500 {
9501 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9502 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9503 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9504 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9505 case 4: /* SIB */
9506 {
9507 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9508 uInfo = (uint64_t)bSib << 32;
9509
9510 /* Get the index and scale it. */
9511 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9512 {
9513 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9514 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9515 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9516 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9517 case 4: u32EffAddr = 0; /*none */ break;
9518 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9519 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9520 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9521 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9522 }
9523 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9524
9525 /* add base */
9526 switch (bSib & X86_SIB_BASE_MASK)
9527 {
9528 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9529 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9530 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9531 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9532 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9533 case 5:
9534 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9535 {
9536 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9537 SET_SS_DEF();
9538 }
9539 else
9540 {
9541 uint32_t u32Disp;
9542 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9543 u32EffAddr += u32Disp;
9544 uInfo |= u32Disp;
9545 }
9546 break;
9547 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9548 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9550 }
9551 break;
9552 }
9553 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9554 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9555 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9556 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9557 }
9558
9559 /* Get and add the displacement. */
9560 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9561 {
9562 case 0:
9563 break;
9564 case 1:
9565 {
9566 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9567 u32EffAddr += i8Disp;
9568 uInfo |= (uint32_t)(int32_t)i8Disp;
9569 break;
9570 }
9571 case 2:
9572 {
9573 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9574 u32EffAddr += u32Disp;
9575 uInfo |= (uint32_t)u32Disp;
9576 break;
9577 }
9578 default:
9579 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9580 }
9581
9582 }
9583 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9584 *pGCPtrEff = u32EffAddr;
9585 else
9586 {
9587 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9588 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9589 }
9590 }
9591 }
9592 else
9593 {
9594 uint64_t u64EffAddr;
9595
9596 /* Handle the rip+disp32 form with no registers first. */
9597 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9598 {
9599 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9600 uInfo = (uint32_t)u64EffAddr;
9601 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9602 }
9603 else
9604 {
9605 /* Get the register (or SIB) value. */
9606 uInfo = 0;
9607 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9608 {
9609 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9610 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9611 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9612 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9613 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9614 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9615 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9616 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9617 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9618 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9619 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9620 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9621 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9622 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9623 /* SIB */
9624 case 4:
9625 case 12:
9626 {
9627 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9628 uInfo = (uint64_t)bSib << 32;
9629
9630 /* Get the index and scale it. */
9631 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9632 {
9633 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9634 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9635 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9636 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9637 case 4: u64EffAddr = 0; /*none */ break;
9638 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9639 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9640 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9641 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9642 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9643 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9644 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9645 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9646 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9647 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9648 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9649 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9650 }
9651 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9652
9653 /* add base */
9654 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9655 {
9656 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9657 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9658 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9659 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9660 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9661 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9662 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9663 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9664 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9665 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9666 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9667 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9668 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9669 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9670 /* complicated encodings */
9671 case 5:
9672 case 13:
9673 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9674 {
9675 if (!pVCpu->iem.s.uRexB)
9676 {
9677 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9678 SET_SS_DEF();
9679 }
9680 else
9681 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9682 }
9683 else
9684 {
9685 uint32_t u32Disp;
9686 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9687 u64EffAddr += (int32_t)u32Disp;
9688 uInfo |= u32Disp;
9689 }
9690 break;
9691 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9692 }
9693 break;
9694 }
9695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9696 }
9697
9698 /* Get and add the displacement. */
9699 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9700 {
9701 case 0:
9702 break;
9703 case 1:
9704 {
9705 int8_t i8Disp;
9706 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9707 u64EffAddr += i8Disp;
9708 uInfo |= (uint32_t)(int32_t)i8Disp;
9709 break;
9710 }
9711 case 2:
9712 {
9713 uint32_t u32Disp;
9714 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9715 u64EffAddr += (int32_t)u32Disp;
9716 uInfo |= u32Disp;
9717 break;
9718 }
9719 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9720 }
9721
9722 }
9723
9724 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9725 *pGCPtrEff = u64EffAddr;
9726 else
9727 {
9728 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9729 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9730 }
9731 }
9732 *puInfo = uInfo;
9733
9734 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9735 return VINF_SUCCESS;
9736}
9737
9738
9739#ifdef IEM_WITH_SETJMP
9740/**
9741 * Calculates the effective address of a ModR/M memory operand, extended version
9742 * for use in the recompilers.
9743 *
9744 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9745 *
9746 * May longjmp on internal error.
9747 *
9748 * @return The effective address.
9749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9750 * @param bRm The ModRM byte.
9751 * @param cbImmAndRspOffset - First byte: The size of any immediate
9752 * following the effective address opcode bytes
9753 * (only for RIP relative addressing).
9754 * - Second byte: RSP displacement (for POP [ESP]).
9755 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9756 * SIB byte (bits 39:32).
9757 */
9758RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP
9759{
9760 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9761# define SET_SS_DEF() \
9762 do \
9763 { \
9764 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9765 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9766 } while (0)
9767
9768 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9769 {
9770/** @todo Check the effective address size crap! */
9771 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9772 {
9773 uint16_t u16EffAddr;
9774
9775 /* Handle the disp16 form with no registers first. */
9776 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9777 {
9778 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9779 *puInfo = u16EffAddr;
9780 }
9781 else
9782 {
9783 /* Get the displacment. */
9784 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9785 {
9786 case 0: u16EffAddr = 0; break;
9787 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9788 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9789 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9790 }
9791 *puInfo = u16EffAddr;
9792
9793 /* Add the base and index registers to the disp. */
9794 switch (bRm & X86_MODRM_RM_MASK)
9795 {
9796 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9797 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9798 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9799 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9800 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9801 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9802 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9803 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9804 }
9805 }
9806
9807 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo));
9808 return u16EffAddr;
9809 }
9810
9811 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9812 uint32_t u32EffAddr;
9813 uint64_t uInfo;
9814
9815 /* Handle the disp32 form with no registers first. */
9816 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9817 {
9818 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9819 uInfo = u32EffAddr;
9820 }
9821 else
9822 {
9823 /* Get the register (or SIB) value. */
9824 uInfo = 0;
9825 switch ((bRm & X86_MODRM_RM_MASK))
9826 {
9827 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9828 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9829 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9830 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9831 case 4: /* SIB */
9832 {
9833 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9834 uInfo = (uint64_t)bSib << 32;
9835
9836 /* Get the index and scale it. */
9837 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9838 {
9839 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9840 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9841 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9842 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9843 case 4: u32EffAddr = 0; /*none */ break;
9844 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9845 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9846 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9847 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9848 }
9849 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9850
9851 /* add base */
9852 switch (bSib & X86_SIB_BASE_MASK)
9853 {
9854 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9855 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9856 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9857 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9858 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9859 case 5:
9860 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9861 {
9862 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9863 SET_SS_DEF();
9864 }
9865 else
9866 {
9867 uint32_t u32Disp;
9868 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9869 u32EffAddr += u32Disp;
9870 uInfo |= u32Disp;
9871 }
9872 break;
9873 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9874 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9875 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9876 }
9877 break;
9878 }
9879 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9880 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9881 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9882 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9883 }
9884
9885 /* Get and add the displacement. */
9886 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9887 {
9888 case 0:
9889 break;
9890 case 1:
9891 {
9892 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9893 u32EffAddr += i8Disp;
9894 uInfo |= (uint32_t)(int32_t)i8Disp;
9895 break;
9896 }
9897 case 2:
9898 {
9899 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9900 u32EffAddr += u32Disp;
9901 uInfo |= u32Disp;
9902 break;
9903 }
9904 default:
9905 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9906 }
9907 }
9908
9909 *puInfo = uInfo;
9910 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo));
9911 return u32EffAddr;
9912 }
9913
9914 uint64_t u64EffAddr;
9915 uint64_t uInfo;
9916
9917 /* Handle the rip+disp32 form with no registers first. */
9918 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9919 {
9920 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9921 uInfo = (uint32_t)u64EffAddr;
9922 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9923 }
9924 else
9925 {
9926 /* Get the register (or SIB) value. */
9927 uInfo = 0;
9928 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9929 {
9930 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9931 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9932 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9933 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9934 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9935 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9936 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9937 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9938 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9939 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9940 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9941 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9942 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9943 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9944 /* SIB */
9945 case 4:
9946 case 12:
9947 {
9948 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9949 uInfo = (uint64_t)bSib << 32;
9950
9951 /* Get the index and scale it. */
9952 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9953 {
9954 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9955 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9956 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9957 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9958 case 4: u64EffAddr = 0; /*none */ break;
9959 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9960 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9961 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9962 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9963 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9964 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9965 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9966 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9967 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9968 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9969 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9970 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9971 }
9972 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9973
9974 /* add base */
9975 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9976 {
9977 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9978 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9979 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9980 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9981 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9982 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9983 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9984 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9985 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9986 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9987 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9988 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9989 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9990 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9991 /* complicated encodings */
9992 case 5:
9993 case 13:
9994 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9995 {
9996 if (!pVCpu->iem.s.uRexB)
9997 {
9998 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9999 SET_SS_DEF();
10000 }
10001 else
10002 u64EffAddr += pVCpu->cpum.GstCtx.r13;
10003 }
10004 else
10005 {
10006 uint32_t u32Disp;
10007 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10008 u64EffAddr += (int32_t)u32Disp;
10009 uInfo |= u32Disp;
10010 }
10011 break;
10012 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
10013 }
10014 break;
10015 }
10016 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
10017 }
10018
10019 /* Get and add the displacement. */
10020 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
10021 {
10022 case 0:
10023 break;
10024 case 1:
10025 {
10026 int8_t i8Disp;
10027 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
10028 u64EffAddr += i8Disp;
10029 uInfo |= (uint32_t)(int32_t)i8Disp;
10030 break;
10031 }
10032 case 2:
10033 {
10034 uint32_t u32Disp;
10035 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10036 u64EffAddr += (int32_t)u32Disp;
10037 uInfo |= u32Disp;
10038 break;
10039 }
10040 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
10041 }
10042
10043 }
10044
10045 *puInfo = uInfo;
10046 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
10047 {
10048 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo));
10049 return u64EffAddr;
10050 }
10051 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
10052 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo));
10053 return u64EffAddr & UINT32_MAX;
10054}
10055#endif /* IEM_WITH_SETJMP */
10056
10057/** @} */
10058
10059
10060#ifdef LOG_ENABLED
10061/**
10062 * Logs the current instruction.
10063 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10064 * @param fSameCtx Set if we have the same context information as the VMM,
10065 * clear if we may have already executed an instruction in
10066 * our debug context. When clear, we assume IEMCPU holds
10067 * valid CPU mode info.
10068 *
10069 * The @a fSameCtx parameter is now misleading and obsolete.
10070 * @param pszFunction The IEM function doing the execution.
10071 */
10072static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
10073{
10074# ifdef IN_RING3
10075 if (LogIs2Enabled())
10076 {
10077 char szInstr[256];
10078 uint32_t cbInstr = 0;
10079 if (fSameCtx)
10080 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10081 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10082 szInstr, sizeof(szInstr), &cbInstr);
10083 else
10084 {
10085 uint32_t fFlags = 0;
10086 switch (pVCpu->iem.s.enmCpuMode)
10087 {
10088 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10089 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10090 case IEMMODE_16BIT:
10091 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
10092 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10093 else
10094 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10095 break;
10096 }
10097 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
10098 szInstr, sizeof(szInstr), &cbInstr);
10099 }
10100
10101 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
10102 Log2(("**** %s\n"
10103 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10104 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10105 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10106 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10107 " %s\n"
10108 , pszFunction,
10109 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
10110 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
10111 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
10112 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
10113 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10114 szInstr));
10115
10116 if (LogIs3Enabled())
10117 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
10118 }
10119 else
10120# endif
10121 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
10122 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
10123 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
10124}
10125#endif /* LOG_ENABLED */
10126
10127
10128#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10129/**
10130 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
10131 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
10132 *
10133 * @returns Modified rcStrict.
10134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10135 * @param rcStrict The instruction execution status.
10136 */
10137static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
10138{
10139 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
10140 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
10141 {
10142 /* VMX preemption timer takes priority over NMI-window exits. */
10143 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
10144 {
10145 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
10146 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
10147 }
10148 /*
10149 * Check remaining intercepts.
10150 *
10151 * NMI-window and Interrupt-window VM-exits.
10152 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
10153 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
10154 *
10155 * See Intel spec. 26.7.6 "NMI-Window Exiting".
10156 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
10157 */
10158 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
10159 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10160 && !TRPMHasTrap(pVCpu))
10161 {
10162 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
10163 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
10164 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
10165 {
10166 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
10167 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
10168 }
10169 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
10170 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
10171 {
10172 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
10173 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
10174 }
10175 }
10176 }
10177 /* TPR-below threshold/APIC write has the highest priority. */
10178 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
10179 {
10180 rcStrict = iemVmxApicWriteEmulation(pVCpu);
10181 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10182 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
10183 }
10184 /* MTF takes priority over VMX-preemption timer. */
10185 else
10186 {
10187 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
10188 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10189 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
10190 }
10191 return rcStrict;
10192}
10193#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10194
10195
10196/**
10197 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10198 * IEMExecOneWithPrefetchedByPC.
10199 *
10200 * Similar code is found in IEMExecLots.
10201 *
10202 * @return Strict VBox status code.
10203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10204 * @param fExecuteInhibit If set, execute the instruction following CLI,
10205 * POP SS and MOV SS,GR.
10206 * @param pszFunction The calling function name.
10207 */
10208DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
10209{
10210 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10211 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10212 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10213 RT_NOREF_PV(pszFunction);
10214
10215#ifdef IEM_WITH_SETJMP
10216 VBOXSTRICTRC rcStrict;
10217 IEM_TRY_SETJMP(pVCpu, rcStrict)
10218 {
10219 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10220 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10221 }
10222 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10223 {
10224 pVCpu->iem.s.cLongJumps++;
10225 }
10226 IEM_CATCH_LONGJMP_END(pVCpu);
10227#else
10228 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10229 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10230#endif
10231 if (rcStrict == VINF_SUCCESS)
10232 pVCpu->iem.s.cInstructions++;
10233 if (pVCpu->iem.s.cActiveMappings > 0)
10234 {
10235 Assert(rcStrict != VINF_SUCCESS);
10236 iemMemRollback(pVCpu);
10237 }
10238 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10239 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10240 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10241
10242//#ifdef DEBUG
10243// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10244//#endif
10245
10246#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10247 /*
10248 * Perform any VMX nested-guest instruction boundary actions.
10249 *
10250 * If any of these causes a VM-exit, we must skip executing the next
10251 * instruction (would run into stale page tables). A VM-exit makes sure
10252 * there is no interrupt-inhibition, so that should ensure we don't go
10253 * to try execute the next instruction. Clearing fExecuteInhibit is
10254 * problematic because of the setjmp/longjmp clobbering above.
10255 */
10256 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10257 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10258 || rcStrict != VINF_SUCCESS)
10259 { /* likely */ }
10260 else
10261 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10262#endif
10263
10264 /* Execute the next instruction as well if a cli, pop ss or
10265 mov ss, Gr has just completed successfully. */
10266 if ( fExecuteInhibit
10267 && rcStrict == VINF_SUCCESS
10268 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10269 {
10270 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
10271 if (rcStrict == VINF_SUCCESS)
10272 {
10273#ifdef LOG_ENABLED
10274 iemLogCurInstr(pVCpu, false, pszFunction);
10275#endif
10276#ifdef IEM_WITH_SETJMP
10277 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10278 {
10279 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10280 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10281 }
10282 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10283 {
10284 pVCpu->iem.s.cLongJumps++;
10285 }
10286 IEM_CATCH_LONGJMP_END(pVCpu);
10287#else
10288 IEM_OPCODE_GET_FIRST_U8(&b);
10289 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10290#endif
10291 if (rcStrict == VINF_SUCCESS)
10292 {
10293 pVCpu->iem.s.cInstructions++;
10294#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10295 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10296 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10297 { /* likely */ }
10298 else
10299 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10300#endif
10301 }
10302 if (pVCpu->iem.s.cActiveMappings > 0)
10303 {
10304 Assert(rcStrict != VINF_SUCCESS);
10305 iemMemRollback(pVCpu);
10306 }
10307 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10308 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10309 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10310 }
10311 else if (pVCpu->iem.s.cActiveMappings > 0)
10312 iemMemRollback(pVCpu);
10313 /** @todo drop this after we bake this change into RIP advancing. */
10314 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10315 }
10316
10317 /*
10318 * Return value fiddling, statistics and sanity assertions.
10319 */
10320 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10321
10322 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10323 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10324 return rcStrict;
10325}
10326
10327
10328/**
10329 * Execute one instruction.
10330 *
10331 * @return Strict VBox status code.
10332 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10333 */
10334VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10335{
10336 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10337#ifdef LOG_ENABLED
10338 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10339#endif
10340
10341 /*
10342 * Do the decoding and emulation.
10343 */
10344 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10345 if (rcStrict == VINF_SUCCESS)
10346 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10347 else if (pVCpu->iem.s.cActiveMappings > 0)
10348 iemMemRollback(pVCpu);
10349
10350 if (rcStrict != VINF_SUCCESS)
10351 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10352 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10353 return rcStrict;
10354}
10355
10356
10357VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10358{
10359 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10360 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10361 if (rcStrict == VINF_SUCCESS)
10362 {
10363 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10364 if (pcbWritten)
10365 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10366 }
10367 else if (pVCpu->iem.s.cActiveMappings > 0)
10368 iemMemRollback(pVCpu);
10369
10370 return rcStrict;
10371}
10372
10373
10374VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10375 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10376{
10377 VBOXSTRICTRC rcStrict;
10378 if ( cbOpcodeBytes
10379 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10380 {
10381 iemInitDecoder(pVCpu, false, false);
10382#ifdef IEM_WITH_CODE_TLB
10383 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10384 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10385 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10386 pVCpu->iem.s.offCurInstrStart = 0;
10387 pVCpu->iem.s.offInstrNextByte = 0;
10388 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10389#else
10390 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10391 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10392#endif
10393 rcStrict = VINF_SUCCESS;
10394 }
10395 else
10396 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10397 if (rcStrict == VINF_SUCCESS)
10398 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10399 else if (pVCpu->iem.s.cActiveMappings > 0)
10400 iemMemRollback(pVCpu);
10401
10402 return rcStrict;
10403}
10404
10405
10406VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10407{
10408 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10409 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10410 if (rcStrict == VINF_SUCCESS)
10411 {
10412 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10413 if (pcbWritten)
10414 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10415 }
10416 else if (pVCpu->iem.s.cActiveMappings > 0)
10417 iemMemRollback(pVCpu);
10418
10419 return rcStrict;
10420}
10421
10422
10423VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10424 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10425{
10426 VBOXSTRICTRC rcStrict;
10427 if ( cbOpcodeBytes
10428 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10429 {
10430 iemInitDecoder(pVCpu, true, false);
10431#ifdef IEM_WITH_CODE_TLB
10432 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10433 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10434 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10435 pVCpu->iem.s.offCurInstrStart = 0;
10436 pVCpu->iem.s.offInstrNextByte = 0;
10437 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10438#else
10439 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10440 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10441#endif
10442 rcStrict = VINF_SUCCESS;
10443 }
10444 else
10445 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10446 if (rcStrict == VINF_SUCCESS)
10447 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10448 else if (pVCpu->iem.s.cActiveMappings > 0)
10449 iemMemRollback(pVCpu);
10450
10451 return rcStrict;
10452}
10453
10454
10455/**
10456 * For handling split cacheline lock operations when the host has split-lock
10457 * detection enabled.
10458 *
10459 * This will cause the interpreter to disregard the lock prefix and implicit
10460 * locking (xchg).
10461 *
10462 * @returns Strict VBox status code.
10463 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10464 */
10465VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10466{
10467 /*
10468 * Do the decoding and emulation.
10469 */
10470 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
10471 if (rcStrict == VINF_SUCCESS)
10472 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10473 else if (pVCpu->iem.s.cActiveMappings > 0)
10474 iemMemRollback(pVCpu);
10475
10476 if (rcStrict != VINF_SUCCESS)
10477 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10478 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10479 return rcStrict;
10480}
10481
10482
10483VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10484{
10485 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10486 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10487 Assert(cMaxInstructions > 0);
10488
10489 /*
10490 * See if there is an interrupt pending in TRPM, inject it if we can.
10491 */
10492 /** @todo What if we are injecting an exception and not an interrupt? Is that
10493 * possible here? For now we assert it is indeed only an interrupt. */
10494 if (!TRPMHasTrap(pVCpu))
10495 { /* likely */ }
10496 else
10497 {
10498 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10499 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10500 {
10501 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10502#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10503 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10504 if (fIntrEnabled)
10505 {
10506 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10507 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10508 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10509 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10510 else
10511 {
10512 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10513 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10514 }
10515 }
10516#else
10517 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10518#endif
10519 if (fIntrEnabled)
10520 {
10521 uint8_t u8TrapNo;
10522 TRPMEVENT enmType;
10523 uint32_t uErrCode;
10524 RTGCPTR uCr2;
10525 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10526 AssertRC(rc2);
10527 Assert(enmType == TRPM_HARDWARE_INT);
10528 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10529
10530 TRPMResetTrap(pVCpu);
10531
10532#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10533 /* Injecting an event may cause a VM-exit. */
10534 if ( rcStrict != VINF_SUCCESS
10535 && rcStrict != VINF_IEM_RAISED_XCPT)
10536 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10537#else
10538 NOREF(rcStrict);
10539#endif
10540 }
10541 }
10542 }
10543
10544 /*
10545 * Initial decoder init w/ prefetch, then setup setjmp.
10546 */
10547 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10548 if (rcStrict == VINF_SUCCESS)
10549 {
10550#ifdef IEM_WITH_SETJMP
10551 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10552 IEM_TRY_SETJMP(pVCpu, rcStrict)
10553#endif
10554 {
10555 /*
10556 * The run loop. We limit ourselves to 4096 instructions right now.
10557 */
10558 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10559 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10560 for (;;)
10561 {
10562 /*
10563 * Log the state.
10564 */
10565#ifdef LOG_ENABLED
10566 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10567#endif
10568
10569 /*
10570 * Do the decoding and emulation.
10571 */
10572 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10573 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10574#ifdef VBOX_STRICT
10575 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10576#endif
10577 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10578 {
10579 Assert(pVCpu->iem.s.cActiveMappings == 0);
10580 pVCpu->iem.s.cInstructions++;
10581
10582#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10583 /* Perform any VMX nested-guest instruction boundary actions. */
10584 uint64_t fCpu = pVCpu->fLocalForcedActions;
10585 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10586 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10587 { /* likely */ }
10588 else
10589 {
10590 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10591 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10592 fCpu = pVCpu->fLocalForcedActions;
10593 else
10594 {
10595 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10596 break;
10597 }
10598 }
10599#endif
10600 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10601 {
10602#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10603 uint64_t fCpu = pVCpu->fLocalForcedActions;
10604#endif
10605 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10606 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10607 | VMCPU_FF_TLB_FLUSH
10608 | VMCPU_FF_UNHALT );
10609
10610 if (RT_LIKELY( ( !fCpu
10611 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10612 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10613 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10614 {
10615 if (--cMaxInstructionsGccStupidity > 0)
10616 {
10617 /* Poll timers every now an then according to the caller's specs. */
10618 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10619 || !TMTimerPollBool(pVM, pVCpu))
10620 {
10621 Assert(pVCpu->iem.s.cActiveMappings == 0);
10622 iemReInitDecoder(pVCpu);
10623 continue;
10624 }
10625 }
10626 }
10627 }
10628 Assert(pVCpu->iem.s.cActiveMappings == 0);
10629 }
10630 else if (pVCpu->iem.s.cActiveMappings > 0)
10631 iemMemRollback(pVCpu);
10632 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10633 break;
10634 }
10635 }
10636#ifdef IEM_WITH_SETJMP
10637 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10638 {
10639 if (pVCpu->iem.s.cActiveMappings > 0)
10640 iemMemRollback(pVCpu);
10641# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10642 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10643# endif
10644 pVCpu->iem.s.cLongJumps++;
10645 }
10646 IEM_CATCH_LONGJMP_END(pVCpu);
10647#endif
10648
10649 /*
10650 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10651 */
10652 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10653 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10654 }
10655 else
10656 {
10657 if (pVCpu->iem.s.cActiveMappings > 0)
10658 iemMemRollback(pVCpu);
10659
10660#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10661 /*
10662 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10663 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10664 */
10665 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10666#endif
10667 }
10668
10669 /*
10670 * Maybe re-enter raw-mode and log.
10671 */
10672 if (rcStrict != VINF_SUCCESS)
10673 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10674 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10675 if (pcInstructions)
10676 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10677 return rcStrict;
10678}
10679
10680
10681/**
10682 * Interface used by EMExecuteExec, does exit statistics and limits.
10683 *
10684 * @returns Strict VBox status code.
10685 * @param pVCpu The cross context virtual CPU structure.
10686 * @param fWillExit To be defined.
10687 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10688 * @param cMaxInstructions Maximum number of instructions to execute.
10689 * @param cMaxInstructionsWithoutExits
10690 * The max number of instructions without exits.
10691 * @param pStats Where to return statistics.
10692 */
10693VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10694 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10695{
10696 NOREF(fWillExit); /** @todo define flexible exit crits */
10697
10698 /*
10699 * Initialize return stats.
10700 */
10701 pStats->cInstructions = 0;
10702 pStats->cExits = 0;
10703 pStats->cMaxExitDistance = 0;
10704 pStats->cReserved = 0;
10705
10706 /*
10707 * Initial decoder init w/ prefetch, then setup setjmp.
10708 */
10709 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10710 if (rcStrict == VINF_SUCCESS)
10711 {
10712#ifdef IEM_WITH_SETJMP
10713 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10714 IEM_TRY_SETJMP(pVCpu, rcStrict)
10715#endif
10716 {
10717#ifdef IN_RING0
10718 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10719#endif
10720 uint32_t cInstructionSinceLastExit = 0;
10721
10722 /*
10723 * The run loop. We limit ourselves to 4096 instructions right now.
10724 */
10725 PVM pVM = pVCpu->CTX_SUFF(pVM);
10726 for (;;)
10727 {
10728 /*
10729 * Log the state.
10730 */
10731#ifdef LOG_ENABLED
10732 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10733#endif
10734
10735 /*
10736 * Do the decoding and emulation.
10737 */
10738 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10739
10740 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10741 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10742
10743 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10744 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10745 {
10746 pStats->cExits += 1;
10747 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10748 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10749 cInstructionSinceLastExit = 0;
10750 }
10751
10752 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10753 {
10754 Assert(pVCpu->iem.s.cActiveMappings == 0);
10755 pVCpu->iem.s.cInstructions++;
10756 pStats->cInstructions++;
10757 cInstructionSinceLastExit++;
10758
10759#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10760 /* Perform any VMX nested-guest instruction boundary actions. */
10761 uint64_t fCpu = pVCpu->fLocalForcedActions;
10762 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10763 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10764 { /* likely */ }
10765 else
10766 {
10767 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10768 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10769 fCpu = pVCpu->fLocalForcedActions;
10770 else
10771 {
10772 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10773 break;
10774 }
10775 }
10776#endif
10777 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10778 {
10779#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10780 uint64_t fCpu = pVCpu->fLocalForcedActions;
10781#endif
10782 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10783 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10784 | VMCPU_FF_TLB_FLUSH
10785 | VMCPU_FF_UNHALT );
10786 if (RT_LIKELY( ( ( !fCpu
10787 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10788 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10789 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10790 || pStats->cInstructions < cMinInstructions))
10791 {
10792 if (pStats->cInstructions < cMaxInstructions)
10793 {
10794 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10795 {
10796#ifdef IN_RING0
10797 if ( !fCheckPreemptionPending
10798 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10799#endif
10800 {
10801 Assert(pVCpu->iem.s.cActiveMappings == 0);
10802 iemReInitDecoder(pVCpu);
10803 continue;
10804 }
10805#ifdef IN_RING0
10806 rcStrict = VINF_EM_RAW_INTERRUPT;
10807 break;
10808#endif
10809 }
10810 }
10811 }
10812 Assert(!(fCpu & VMCPU_FF_IEM));
10813 }
10814 Assert(pVCpu->iem.s.cActiveMappings == 0);
10815 }
10816 else if (pVCpu->iem.s.cActiveMappings > 0)
10817 iemMemRollback(pVCpu);
10818 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10819 break;
10820 }
10821 }
10822#ifdef IEM_WITH_SETJMP
10823 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10824 {
10825 if (pVCpu->iem.s.cActiveMappings > 0)
10826 iemMemRollback(pVCpu);
10827 pVCpu->iem.s.cLongJumps++;
10828 }
10829 IEM_CATCH_LONGJMP_END(pVCpu);
10830#endif
10831
10832 /*
10833 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10834 */
10835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10837 }
10838 else
10839 {
10840 if (pVCpu->iem.s.cActiveMappings > 0)
10841 iemMemRollback(pVCpu);
10842
10843#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10844 /*
10845 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10846 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10847 */
10848 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10849#endif
10850 }
10851
10852 /*
10853 * Maybe re-enter raw-mode and log.
10854 */
10855 if (rcStrict != VINF_SUCCESS)
10856 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10858 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10859 return rcStrict;
10860}
10861
10862
10863/**
10864 * Injects a trap, fault, abort, software interrupt or external interrupt.
10865 *
10866 * The parameter list matches TRPMQueryTrapAll pretty closely.
10867 *
10868 * @returns Strict VBox status code.
10869 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10870 * @param u8TrapNo The trap number.
10871 * @param enmType What type is it (trap/fault/abort), software
10872 * interrupt or hardware interrupt.
10873 * @param uErrCode The error code if applicable.
10874 * @param uCr2 The CR2 value if applicable.
10875 * @param cbInstr The instruction length (only relevant for
10876 * software interrupts).
10877 */
10878VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10879 uint8_t cbInstr)
10880{
10881 iemInitDecoder(pVCpu, false, false);
10882#ifdef DBGFTRACE_ENABLED
10883 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10884 u8TrapNo, enmType, uErrCode, uCr2);
10885#endif
10886
10887 uint32_t fFlags;
10888 switch (enmType)
10889 {
10890 case TRPM_HARDWARE_INT:
10891 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10892 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10893 uErrCode = uCr2 = 0;
10894 break;
10895
10896 case TRPM_SOFTWARE_INT:
10897 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10898 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10899 uErrCode = uCr2 = 0;
10900 break;
10901
10902 case TRPM_TRAP:
10903 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10904 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10905 if (u8TrapNo == X86_XCPT_PF)
10906 fFlags |= IEM_XCPT_FLAGS_CR2;
10907 switch (u8TrapNo)
10908 {
10909 case X86_XCPT_DF:
10910 case X86_XCPT_TS:
10911 case X86_XCPT_NP:
10912 case X86_XCPT_SS:
10913 case X86_XCPT_PF:
10914 case X86_XCPT_AC:
10915 case X86_XCPT_GP:
10916 fFlags |= IEM_XCPT_FLAGS_ERR;
10917 break;
10918 }
10919 break;
10920
10921 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10922 }
10923
10924 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10925
10926 if (pVCpu->iem.s.cActiveMappings > 0)
10927 iemMemRollback(pVCpu);
10928
10929 return rcStrict;
10930}
10931
10932
10933/**
10934 * Injects the active TRPM event.
10935 *
10936 * @returns Strict VBox status code.
10937 * @param pVCpu The cross context virtual CPU structure.
10938 */
10939VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10940{
10941#ifndef IEM_IMPLEMENTS_TASKSWITCH
10942 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10943#else
10944 uint8_t u8TrapNo;
10945 TRPMEVENT enmType;
10946 uint32_t uErrCode;
10947 RTGCUINTPTR uCr2;
10948 uint8_t cbInstr;
10949 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10950 if (RT_FAILURE(rc))
10951 return rc;
10952
10953 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10954 * ICEBP \#DB injection as a special case. */
10955 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10956#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10957 if (rcStrict == VINF_SVM_VMEXIT)
10958 rcStrict = VINF_SUCCESS;
10959#endif
10960#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10961 if (rcStrict == VINF_VMX_VMEXIT)
10962 rcStrict = VINF_SUCCESS;
10963#endif
10964 /** @todo Are there any other codes that imply the event was successfully
10965 * delivered to the guest? See @bugref{6607}. */
10966 if ( rcStrict == VINF_SUCCESS
10967 || rcStrict == VINF_IEM_RAISED_XCPT)
10968 TRPMResetTrap(pVCpu);
10969
10970 return rcStrict;
10971#endif
10972}
10973
10974
10975VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10976{
10977 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10978 return VERR_NOT_IMPLEMENTED;
10979}
10980
10981
10982VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10983{
10984 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10985 return VERR_NOT_IMPLEMENTED;
10986}
10987
10988
10989/**
10990 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10991 *
10992 * This API ASSUMES that the caller has already verified that the guest code is
10993 * allowed to access the I/O port. (The I/O port is in the DX register in the
10994 * guest state.)
10995 *
10996 * @returns Strict VBox status code.
10997 * @param pVCpu The cross context virtual CPU structure.
10998 * @param cbValue The size of the I/O port access (1, 2, or 4).
10999 * @param enmAddrMode The addressing mode.
11000 * @param fRepPrefix Indicates whether a repeat prefix is used
11001 * (doesn't matter which for this instruction).
11002 * @param cbInstr The instruction length in bytes.
11003 * @param iEffSeg The effective segment address.
11004 * @param fIoChecked Whether the access to the I/O port has been
11005 * checked or not. It's typically checked in the
11006 * HM scenario.
11007 */
11008VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11009 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11010{
11011 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11012 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11013
11014 /*
11015 * State init.
11016 */
11017 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11018
11019 /*
11020 * Switch orgy for getting to the right handler.
11021 */
11022 VBOXSTRICTRC rcStrict;
11023 if (fRepPrefix)
11024 {
11025 switch (enmAddrMode)
11026 {
11027 case IEMMODE_16BIT:
11028 switch (cbValue)
11029 {
11030 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11031 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11032 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11033 default:
11034 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11035 }
11036 break;
11037
11038 case IEMMODE_32BIT:
11039 switch (cbValue)
11040 {
11041 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11042 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11043 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11044 default:
11045 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11046 }
11047 break;
11048
11049 case IEMMODE_64BIT:
11050 switch (cbValue)
11051 {
11052 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11053 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11054 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11055 default:
11056 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11057 }
11058 break;
11059
11060 default:
11061 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11062 }
11063 }
11064 else
11065 {
11066 switch (enmAddrMode)
11067 {
11068 case IEMMODE_16BIT:
11069 switch (cbValue)
11070 {
11071 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11072 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11073 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11074 default:
11075 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11076 }
11077 break;
11078
11079 case IEMMODE_32BIT:
11080 switch (cbValue)
11081 {
11082 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11083 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11084 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11085 default:
11086 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11087 }
11088 break;
11089
11090 case IEMMODE_64BIT:
11091 switch (cbValue)
11092 {
11093 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11094 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11095 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11096 default:
11097 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11098 }
11099 break;
11100
11101 default:
11102 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11103 }
11104 }
11105
11106 if (pVCpu->iem.s.cActiveMappings)
11107 iemMemRollback(pVCpu);
11108
11109 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11110}
11111
11112
11113/**
11114 * Interface for HM and EM for executing string I/O IN (read) instructions.
11115 *
11116 * This API ASSUMES that the caller has already verified that the guest code is
11117 * allowed to access the I/O port. (The I/O port is in the DX register in the
11118 * guest state.)
11119 *
11120 * @returns Strict VBox status code.
11121 * @param pVCpu The cross context virtual CPU structure.
11122 * @param cbValue The size of the I/O port access (1, 2, or 4).
11123 * @param enmAddrMode The addressing mode.
11124 * @param fRepPrefix Indicates whether a repeat prefix is used
11125 * (doesn't matter which for this instruction).
11126 * @param cbInstr The instruction length in bytes.
11127 * @param fIoChecked Whether the access to the I/O port has been
11128 * checked or not. It's typically checked in the
11129 * HM scenario.
11130 */
11131VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11132 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11133{
11134 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11135
11136 /*
11137 * State init.
11138 */
11139 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11140
11141 /*
11142 * Switch orgy for getting to the right handler.
11143 */
11144 VBOXSTRICTRC rcStrict;
11145 if (fRepPrefix)
11146 {
11147 switch (enmAddrMode)
11148 {
11149 case IEMMODE_16BIT:
11150 switch (cbValue)
11151 {
11152 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11153 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11154 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11155 default:
11156 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11157 }
11158 break;
11159
11160 case IEMMODE_32BIT:
11161 switch (cbValue)
11162 {
11163 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11164 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11165 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11166 default:
11167 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11168 }
11169 break;
11170
11171 case IEMMODE_64BIT:
11172 switch (cbValue)
11173 {
11174 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11175 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11176 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11177 default:
11178 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11179 }
11180 break;
11181
11182 default:
11183 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11184 }
11185 }
11186 else
11187 {
11188 switch (enmAddrMode)
11189 {
11190 case IEMMODE_16BIT:
11191 switch (cbValue)
11192 {
11193 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11194 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11195 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11196 default:
11197 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11198 }
11199 break;
11200
11201 case IEMMODE_32BIT:
11202 switch (cbValue)
11203 {
11204 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11205 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11206 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11207 default:
11208 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11209 }
11210 break;
11211
11212 case IEMMODE_64BIT:
11213 switch (cbValue)
11214 {
11215 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11216 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11217 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11218 default:
11219 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11220 }
11221 break;
11222
11223 default:
11224 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11225 }
11226 }
11227
11228 if ( pVCpu->iem.s.cActiveMappings == 0
11229 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11230 { /* likely */ }
11231 else
11232 {
11233 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11234 iemMemRollback(pVCpu);
11235 }
11236 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11237}
11238
11239
11240/**
11241 * Interface for rawmode to write execute an OUT instruction.
11242 *
11243 * @returns Strict VBox status code.
11244 * @param pVCpu The cross context virtual CPU structure.
11245 * @param cbInstr The instruction length in bytes.
11246 * @param u16Port The port to read.
11247 * @param fImm Whether the port is specified using an immediate operand or
11248 * using the implicit DX register.
11249 * @param cbReg The register size.
11250 *
11251 * @remarks In ring-0 not all of the state needs to be synced in.
11252 */
11253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11254{
11255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11256 Assert(cbReg <= 4 && cbReg != 3);
11257
11258 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11260 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11261 Assert(!pVCpu->iem.s.cActiveMappings);
11262 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11263}
11264
11265
11266/**
11267 * Interface for rawmode to write execute an IN instruction.
11268 *
11269 * @returns Strict VBox status code.
11270 * @param pVCpu The cross context virtual CPU structure.
11271 * @param cbInstr The instruction length in bytes.
11272 * @param u16Port The port to read.
11273 * @param fImm Whether the port is specified using an immediate operand or
11274 * using the implicit DX.
11275 * @param cbReg The register size.
11276 */
11277VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11278{
11279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11280 Assert(cbReg <= 4 && cbReg != 3);
11281
11282 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11283 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11284 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11285 Assert(!pVCpu->iem.s.cActiveMappings);
11286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11287}
11288
11289
11290/**
11291 * Interface for HM and EM to write to a CRx register.
11292 *
11293 * @returns Strict VBox status code.
11294 * @param pVCpu The cross context virtual CPU structure.
11295 * @param cbInstr The instruction length in bytes.
11296 * @param iCrReg The control register number (destination).
11297 * @param iGReg The general purpose register number (source).
11298 *
11299 * @remarks In ring-0 not all of the state needs to be synced in.
11300 */
11301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11302{
11303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11304 Assert(iCrReg < 16);
11305 Assert(iGReg < 16);
11306
11307 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11309 Assert(!pVCpu->iem.s.cActiveMappings);
11310 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11311}
11312
11313
11314/**
11315 * Interface for HM and EM to read from a CRx register.
11316 *
11317 * @returns Strict VBox status code.
11318 * @param pVCpu The cross context virtual CPU structure.
11319 * @param cbInstr The instruction length in bytes.
11320 * @param iGReg The general purpose register number (destination).
11321 * @param iCrReg The control register number (source).
11322 *
11323 * @remarks In ring-0 not all of the state needs to be synced in.
11324 */
11325VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11326{
11327 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11328 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11329 | CPUMCTX_EXTRN_APIC_TPR);
11330 Assert(iCrReg < 16);
11331 Assert(iGReg < 16);
11332
11333 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11334 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11335 Assert(!pVCpu->iem.s.cActiveMappings);
11336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11337}
11338
11339
11340/**
11341 * Interface for HM and EM to write to a DRx register.
11342 *
11343 * @returns Strict VBox status code.
11344 * @param pVCpu The cross context virtual CPU structure.
11345 * @param cbInstr The instruction length in bytes.
11346 * @param iDrReg The debug register number (destination).
11347 * @param iGReg The general purpose register number (source).
11348 *
11349 * @remarks In ring-0 not all of the state needs to be synced in.
11350 */
11351VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11352{
11353 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11354 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11355 Assert(iDrReg < 8);
11356 Assert(iGReg < 16);
11357
11358 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11359 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11360 Assert(!pVCpu->iem.s.cActiveMappings);
11361 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11362}
11363
11364
11365/**
11366 * Interface for HM and EM to read from a DRx register.
11367 *
11368 * @returns Strict VBox status code.
11369 * @param pVCpu The cross context virtual CPU structure.
11370 * @param cbInstr The instruction length in bytes.
11371 * @param iGReg The general purpose register number (destination).
11372 * @param iDrReg The debug register number (source).
11373 *
11374 * @remarks In ring-0 not all of the state needs to be synced in.
11375 */
11376VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11377{
11378 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11379 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11380 Assert(iDrReg < 8);
11381 Assert(iGReg < 16);
11382
11383 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11384 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11385 Assert(!pVCpu->iem.s.cActiveMappings);
11386 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11387}
11388
11389
11390/**
11391 * Interface for HM and EM to clear the CR0[TS] bit.
11392 *
11393 * @returns Strict VBox status code.
11394 * @param pVCpu The cross context virtual CPU structure.
11395 * @param cbInstr The instruction length in bytes.
11396 *
11397 * @remarks In ring-0 not all of the state needs to be synced in.
11398 */
11399VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11400{
11401 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11402
11403 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11404 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11405 Assert(!pVCpu->iem.s.cActiveMappings);
11406 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11407}
11408
11409
11410/**
11411 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11412 *
11413 * @returns Strict VBox status code.
11414 * @param pVCpu The cross context virtual CPU structure.
11415 * @param cbInstr The instruction length in bytes.
11416 * @param uValue The value to load into CR0.
11417 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11418 * memory operand. Otherwise pass NIL_RTGCPTR.
11419 *
11420 * @remarks In ring-0 not all of the state needs to be synced in.
11421 */
11422VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11423{
11424 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11425
11426 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11427 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11428 Assert(!pVCpu->iem.s.cActiveMappings);
11429 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11430}
11431
11432
11433/**
11434 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11435 *
11436 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11437 *
11438 * @returns Strict VBox status code.
11439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11440 * @param cbInstr The instruction length in bytes.
11441 * @remarks In ring-0 not all of the state needs to be synced in.
11442 * @thread EMT(pVCpu)
11443 */
11444VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11445{
11446 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11447
11448 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11449 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11450 Assert(!pVCpu->iem.s.cActiveMappings);
11451 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11452}
11453
11454
11455/**
11456 * Interface for HM and EM to emulate the WBINVD instruction.
11457 *
11458 * @returns Strict VBox status code.
11459 * @param pVCpu The cross context virtual CPU structure.
11460 * @param cbInstr The instruction length in bytes.
11461 *
11462 * @remarks In ring-0 not all of the state needs to be synced in.
11463 */
11464VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11465{
11466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11467
11468 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11469 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11470 Assert(!pVCpu->iem.s.cActiveMappings);
11471 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11472}
11473
11474
11475/**
11476 * Interface for HM and EM to emulate the INVD instruction.
11477 *
11478 * @returns Strict VBox status code.
11479 * @param pVCpu The cross context virtual CPU structure.
11480 * @param cbInstr The instruction length in bytes.
11481 *
11482 * @remarks In ring-0 not all of the state needs to be synced in.
11483 */
11484VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11485{
11486 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11487
11488 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11490 Assert(!pVCpu->iem.s.cActiveMappings);
11491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11492}
11493
11494
11495/**
11496 * Interface for HM and EM to emulate the INVLPG instruction.
11497 *
11498 * @returns Strict VBox status code.
11499 * @retval VINF_PGM_SYNC_CR3
11500 *
11501 * @param pVCpu The cross context virtual CPU structure.
11502 * @param cbInstr The instruction length in bytes.
11503 * @param GCPtrPage The effective address of the page to invalidate.
11504 *
11505 * @remarks In ring-0 not all of the state needs to be synced in.
11506 */
11507VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11508{
11509 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11510
11511 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11513 Assert(!pVCpu->iem.s.cActiveMappings);
11514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11515}
11516
11517
11518/**
11519 * Interface for HM and EM to emulate the INVPCID instruction.
11520 *
11521 * @returns Strict VBox status code.
11522 * @retval VINF_PGM_SYNC_CR3
11523 *
11524 * @param pVCpu The cross context virtual CPU structure.
11525 * @param cbInstr The instruction length in bytes.
11526 * @param iEffSeg The effective segment register.
11527 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11528 * @param uType The invalidation type.
11529 *
11530 * @remarks In ring-0 not all of the state needs to be synced in.
11531 */
11532VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11533 uint64_t uType)
11534{
11535 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11536
11537 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11538 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11539 Assert(!pVCpu->iem.s.cActiveMappings);
11540 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11541}
11542
11543
11544/**
11545 * Interface for HM and EM to emulate the CPUID instruction.
11546 *
11547 * @returns Strict VBox status code.
11548 *
11549 * @param pVCpu The cross context virtual CPU structure.
11550 * @param cbInstr The instruction length in bytes.
11551 *
11552 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11553 */
11554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11555{
11556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11557 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11558
11559 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11560 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11561 Assert(!pVCpu->iem.s.cActiveMappings);
11562 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11563}
11564
11565
11566/**
11567 * Interface for HM and EM to emulate the RDPMC instruction.
11568 *
11569 * @returns Strict VBox status code.
11570 *
11571 * @param pVCpu The cross context virtual CPU structure.
11572 * @param cbInstr The instruction length in bytes.
11573 *
11574 * @remarks Not all of the state needs to be synced in.
11575 */
11576VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11577{
11578 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11579 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11580
11581 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11582 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11583 Assert(!pVCpu->iem.s.cActiveMappings);
11584 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11585}
11586
11587
11588/**
11589 * Interface for HM and EM to emulate the RDTSC instruction.
11590 *
11591 * @returns Strict VBox status code.
11592 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11593 *
11594 * @param pVCpu The cross context virtual CPU structure.
11595 * @param cbInstr The instruction length in bytes.
11596 *
11597 * @remarks Not all of the state needs to be synced in.
11598 */
11599VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11600{
11601 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11602 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11603
11604 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11606 Assert(!pVCpu->iem.s.cActiveMappings);
11607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11608}
11609
11610
11611/**
11612 * Interface for HM and EM to emulate the RDTSCP instruction.
11613 *
11614 * @returns Strict VBox status code.
11615 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11616 *
11617 * @param pVCpu The cross context virtual CPU structure.
11618 * @param cbInstr The instruction length in bytes.
11619 *
11620 * @remarks Not all of the state needs to be synced in. Recommended
11621 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11622 */
11623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11624{
11625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11626 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11627
11628 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11630 Assert(!pVCpu->iem.s.cActiveMappings);
11631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11632}
11633
11634
11635/**
11636 * Interface for HM and EM to emulate the RDMSR instruction.
11637 *
11638 * @returns Strict VBox status code.
11639 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11640 *
11641 * @param pVCpu The cross context virtual CPU structure.
11642 * @param cbInstr The instruction length in bytes.
11643 *
11644 * @remarks Not all of the state needs to be synced in. Requires RCX and
11645 * (currently) all MSRs.
11646 */
11647VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11648{
11649 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11650 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11651
11652 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11654 Assert(!pVCpu->iem.s.cActiveMappings);
11655 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11656}
11657
11658
11659/**
11660 * Interface for HM and EM to emulate the WRMSR instruction.
11661 *
11662 * @returns Strict VBox status code.
11663 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11664 *
11665 * @param pVCpu The cross context virtual CPU structure.
11666 * @param cbInstr The instruction length in bytes.
11667 *
11668 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11669 * and (currently) all MSRs.
11670 */
11671VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11672{
11673 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11674 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11675 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11676
11677 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11678 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11679 Assert(!pVCpu->iem.s.cActiveMappings);
11680 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11681}
11682
11683
11684/**
11685 * Interface for HM and EM to emulate the MONITOR instruction.
11686 *
11687 * @returns Strict VBox status code.
11688 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11689 *
11690 * @param pVCpu The cross context virtual CPU structure.
11691 * @param cbInstr The instruction length in bytes.
11692 *
11693 * @remarks Not all of the state needs to be synced in.
11694 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11695 * are used.
11696 */
11697VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11698{
11699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11700 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11701
11702 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11703 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11704 Assert(!pVCpu->iem.s.cActiveMappings);
11705 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11706}
11707
11708
11709/**
11710 * Interface for HM and EM to emulate the MWAIT instruction.
11711 *
11712 * @returns Strict VBox status code.
11713 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11714 *
11715 * @param pVCpu The cross context virtual CPU structure.
11716 * @param cbInstr The instruction length in bytes.
11717 *
11718 * @remarks Not all of the state needs to be synced in.
11719 */
11720VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11721{
11722 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11723 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11724
11725 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11726 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11727 Assert(!pVCpu->iem.s.cActiveMappings);
11728 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11729}
11730
11731
11732/**
11733 * Interface for HM and EM to emulate the HLT instruction.
11734 *
11735 * @returns Strict VBox status code.
11736 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11737 *
11738 * @param pVCpu The cross context virtual CPU structure.
11739 * @param cbInstr The instruction length in bytes.
11740 *
11741 * @remarks Not all of the state needs to be synced in.
11742 */
11743VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11744{
11745 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11746
11747 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11748 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11749 Assert(!pVCpu->iem.s.cActiveMappings);
11750 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11751}
11752
11753
11754/**
11755 * Checks if IEM is in the process of delivering an event (interrupt or
11756 * exception).
11757 *
11758 * @returns true if we're in the process of raising an interrupt or exception,
11759 * false otherwise.
11760 * @param pVCpu The cross context virtual CPU structure.
11761 * @param puVector Where to store the vector associated with the
11762 * currently delivered event, optional.
11763 * @param pfFlags Where to store th event delivery flags (see
11764 * IEM_XCPT_FLAGS_XXX), optional.
11765 * @param puErr Where to store the error code associated with the
11766 * event, optional.
11767 * @param puCr2 Where to store the CR2 associated with the event,
11768 * optional.
11769 * @remarks The caller should check the flags to determine if the error code and
11770 * CR2 are valid for the event.
11771 */
11772VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11773{
11774 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11775 if (fRaisingXcpt)
11776 {
11777 if (puVector)
11778 *puVector = pVCpu->iem.s.uCurXcpt;
11779 if (pfFlags)
11780 *pfFlags = pVCpu->iem.s.fCurXcpt;
11781 if (puErr)
11782 *puErr = pVCpu->iem.s.uCurXcptErr;
11783 if (puCr2)
11784 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11785 }
11786 return fRaisingXcpt;
11787}
11788
11789#ifdef IN_RING3
11790
11791/**
11792 * Handles the unlikely and probably fatal merge cases.
11793 *
11794 * @returns Merged status code.
11795 * @param rcStrict Current EM status code.
11796 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11797 * with @a rcStrict.
11798 * @param iMemMap The memory mapping index. For error reporting only.
11799 * @param pVCpu The cross context virtual CPU structure of the calling
11800 * thread, for error reporting only.
11801 */
11802DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11803 unsigned iMemMap, PVMCPUCC pVCpu)
11804{
11805 if (RT_FAILURE_NP(rcStrict))
11806 return rcStrict;
11807
11808 if (RT_FAILURE_NP(rcStrictCommit))
11809 return rcStrictCommit;
11810
11811 if (rcStrict == rcStrictCommit)
11812 return rcStrictCommit;
11813
11814 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11815 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11816 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11819 return VERR_IOM_FF_STATUS_IPE;
11820}
11821
11822
11823/**
11824 * Helper for IOMR3ProcessForceFlag.
11825 *
11826 * @returns Merged status code.
11827 * @param rcStrict Current EM status code.
11828 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11829 * with @a rcStrict.
11830 * @param iMemMap The memory mapping index. For error reporting only.
11831 * @param pVCpu The cross context virtual CPU structure of the calling
11832 * thread, for error reporting only.
11833 */
11834DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11835{
11836 /* Simple. */
11837 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11838 return rcStrictCommit;
11839
11840 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11841 return rcStrict;
11842
11843 /* EM scheduling status codes. */
11844 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11845 && rcStrict <= VINF_EM_LAST))
11846 {
11847 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11848 && rcStrictCommit <= VINF_EM_LAST))
11849 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11850 }
11851
11852 /* Unlikely */
11853 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11854}
11855
11856
11857/**
11858 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11859 *
11860 * @returns Merge between @a rcStrict and what the commit operation returned.
11861 * @param pVM The cross context VM structure.
11862 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11863 * @param rcStrict The status code returned by ring-0 or raw-mode.
11864 */
11865VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11866{
11867 /*
11868 * Reset the pending commit.
11869 */
11870 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11871 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11872 ("%#x %#x %#x\n",
11873 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11874 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11875
11876 /*
11877 * Commit the pending bounce buffers (usually just one).
11878 */
11879 unsigned cBufs = 0;
11880 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11881 while (iMemMap-- > 0)
11882 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11883 {
11884 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11885 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11886 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11887
11888 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11889 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11890 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11891
11892 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11893 {
11894 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11896 pbBuf,
11897 cbFirst,
11898 PGMACCESSORIGIN_IEM);
11899 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11900 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11901 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11902 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11903 }
11904
11905 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11906 {
11907 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11908 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11909 pbBuf + cbFirst,
11910 cbSecond,
11911 PGMACCESSORIGIN_IEM);
11912 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11913 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11914 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11915 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11916 }
11917 cBufs++;
11918 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11919 }
11920
11921 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11922 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11923 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11924 pVCpu->iem.s.cActiveMappings = 0;
11925 return rcStrict;
11926}
11927
11928#endif /* IN_RING3 */
11929
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette