VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 99863

Last change on this file since 99863 was 99739, checked in by vboxsync, 19 months ago

*: doxygen corrections (mostly about removing @returns from functions returning void).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 483.4 KB
Line 
1/* $Id: IEMAll.cpp 99739 2023-05-11 01:01:08Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Slow path of iemInitDecoder() and iemInitExec() that checks what kind of
177 * breakpoints are enabled.
178 *
179 * @param pVCpu The cross context virtual CPU structure of the
180 * calling thread.
181 */
182void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu)
183{
184 /*
185 * Process guest breakpoints.
186 */
187#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
188 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
189 { \
190 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
191 { \
192 case X86_DR7_RW_EO: \
193 pVCpu->iem.s.fPendingInstructionBreakpoints = true; \
194 break; \
195 case X86_DR7_RW_WO: \
196 case X86_DR7_RW_RW: \
197 pVCpu->iem.s.fPendingDataBreakpoints = true; \
198 break; \
199 case X86_DR7_RW_IO: \
200 pVCpu->iem.s.fPendingIoBreakpoints = true; \
201 break; \
202 } \
203 } \
204 } while (0)
205 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
206 if (fGstDr7 & X86_DR7_ENABLED_MASK)
207 {
208 PROCESS_ONE_BP(fGstDr7, 0);
209 PROCESS_ONE_BP(fGstDr7, 1);
210 PROCESS_ONE_BP(fGstDr7, 2);
211 PROCESS_ONE_BP(fGstDr7, 3);
212 }
213
214 /*
215 * Process hypervisor breakpoints.
216 */
217 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
218 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
219 {
220 PROCESS_ONE_BP(fHyperDr7, 0);
221 PROCESS_ONE_BP(fHyperDr7, 1);
222 PROCESS_ONE_BP(fHyperDr7, 2);
223 PROCESS_ONE_BP(fHyperDr7, 3);
224 }
225}
226
227
228/**
229 * Initializes the decoder state.
230 *
231 * iemReInitDecoder is mostly a copy of this function.
232 *
233 * @param pVCpu The cross context virtual CPU structure of the
234 * calling thread.
235 * @param fBypassHandlers Whether to bypass access handlers.
236 * @param fDisregardLock Whether to disregard the LOCK prefix.
237 */
238DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
239{
240 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
241 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
244 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
247 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
250
251 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
252 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
253 pVCpu->iem.s.enmCpuMode = enmMode;
254 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
255 pVCpu->iem.s.enmEffAddrMode = enmMode;
256 if (enmMode != IEMMODE_64BIT)
257 {
258 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
259 pVCpu->iem.s.enmEffOpSize = enmMode;
260 }
261 else
262 {
263 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
264 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
265 }
266 pVCpu->iem.s.fPrefixes = 0;
267 pVCpu->iem.s.uRexReg = 0;
268 pVCpu->iem.s.uRexB = 0;
269 pVCpu->iem.s.uRexIndex = 0;
270 pVCpu->iem.s.idxPrefix = 0;
271 pVCpu->iem.s.uVex3rdReg = 0;
272 pVCpu->iem.s.uVexLength = 0;
273 pVCpu->iem.s.fEvexStuff = 0;
274 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
275#ifdef IEM_WITH_CODE_TLB
276 pVCpu->iem.s.pbInstrBuf = NULL;
277 pVCpu->iem.s.offInstrNextByte = 0;
278 pVCpu->iem.s.offCurInstrStart = 0;
279# ifdef VBOX_STRICT
280 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
281 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
282 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
283# endif
284#else
285 pVCpu->iem.s.offOpcode = 0;
286 pVCpu->iem.s.cbOpcode = 0;
287#endif
288 pVCpu->iem.s.offModRm = 0;
289 pVCpu->iem.s.cActiveMappings = 0;
290 pVCpu->iem.s.iNextMapping = 0;
291 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
292 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
293 pVCpu->iem.s.fDisregardLock = fDisregardLock;
294 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
295 pVCpu->iem.s.fPendingDataBreakpoints = false;
296 pVCpu->iem.s.fPendingIoBreakpoints = false;
297 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
298 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
299 { /* likely */ }
300 else
301 iemInitPendingBreakpointsSlow(pVCpu);
302
303#ifdef DBGFTRACE_ENABLED
304 switch (enmMode)
305 {
306 case IEMMODE_64BIT:
307 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
308 break;
309 case IEMMODE_32BIT:
310 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
311 break;
312 case IEMMODE_16BIT:
313 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
314 break;
315 }
316#endif
317}
318
319
320/**
321 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
322 *
323 * This is mostly a copy of iemInitDecoder.
324 *
325 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
326 */
327DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
328{
329 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
338
339 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
340 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
341 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
342 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
343 pVCpu->iem.s.enmEffAddrMode = enmMode;
344 if (enmMode != IEMMODE_64BIT)
345 {
346 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
347 pVCpu->iem.s.enmEffOpSize = enmMode;
348 }
349 else
350 {
351 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
352 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
353 }
354 pVCpu->iem.s.fPrefixes = 0;
355 pVCpu->iem.s.uRexReg = 0;
356 pVCpu->iem.s.uRexB = 0;
357 pVCpu->iem.s.uRexIndex = 0;
358 pVCpu->iem.s.idxPrefix = 0;
359 pVCpu->iem.s.uVex3rdReg = 0;
360 pVCpu->iem.s.uVexLength = 0;
361 pVCpu->iem.s.fEvexStuff = 0;
362 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
363#ifdef IEM_WITH_CODE_TLB
364 if (pVCpu->iem.s.pbInstrBuf)
365 {
366 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
367 ? pVCpu->cpum.GstCtx.rip
368 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
369 - pVCpu->iem.s.uInstrBufPc;
370 if (off < pVCpu->iem.s.cbInstrBufTotal)
371 {
372 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
373 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
374 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
375 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
376 else
377 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
378 }
379 else
380 {
381 pVCpu->iem.s.pbInstrBuf = NULL;
382 pVCpu->iem.s.offInstrNextByte = 0;
383 pVCpu->iem.s.offCurInstrStart = 0;
384 pVCpu->iem.s.cbInstrBuf = 0;
385 pVCpu->iem.s.cbInstrBufTotal = 0;
386 }
387 }
388 else
389 {
390 pVCpu->iem.s.offInstrNextByte = 0;
391 pVCpu->iem.s.offCurInstrStart = 0;
392 pVCpu->iem.s.cbInstrBuf = 0;
393 pVCpu->iem.s.cbInstrBufTotal = 0;
394 }
395#else
396 pVCpu->iem.s.cbOpcode = 0;
397 pVCpu->iem.s.offOpcode = 0;
398#endif
399 pVCpu->iem.s.offModRm = 0;
400 Assert(pVCpu->iem.s.cActiveMappings == 0);
401 pVCpu->iem.s.iNextMapping = 0;
402 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
403 Assert(pVCpu->iem.s.fBypassHandlers == false);
404
405#ifdef DBGFTRACE_ENABLED
406 switch (enmMode)
407 {
408 case IEMMODE_64BIT:
409 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
410 break;
411 case IEMMODE_32BIT:
412 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
413 break;
414 case IEMMODE_16BIT:
415 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
416 break;
417 }
418#endif
419}
420
421
422
423/**
424 * Prefetch opcodes the first time when starting executing.
425 *
426 * @returns Strict VBox status code.
427 * @param pVCpu The cross context virtual CPU structure of the
428 * calling thread.
429 * @param fBypassHandlers Whether to bypass access handlers.
430 * @param fDisregardLock Whether to disregard LOCK prefixes.
431 *
432 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
433 * store them as such.
434 */
435static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
436{
437 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
438
439#ifndef IEM_WITH_CODE_TLB
440 /*
441 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
442 *
443 * First translate CS:rIP to a physical address.
444 *
445 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
446 * all relevant bytes from the first page, as it ASSUMES it's only ever
447 * called for dealing with CS.LIM, page crossing and instructions that
448 * are too long.
449 */
450 uint32_t cbToTryRead;
451 RTGCPTR GCPtrPC;
452 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
453 {
454 cbToTryRead = GUEST_PAGE_SIZE;
455 GCPtrPC = pVCpu->cpum.GstCtx.rip;
456 if (IEM_IS_CANONICAL(GCPtrPC))
457 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 else
459 return iemRaiseGeneralProtectionFault0(pVCpu);
460 }
461 else
462 {
463 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
464 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
465 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
466 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
467 else
468 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
469 if (cbToTryRead) { /* likely */ }
470 else /* overflowed */
471 {
472 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
473 cbToTryRead = UINT32_MAX;
474 }
475 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
476 Assert(GCPtrPC <= UINT32_MAX);
477 }
478
479 PGMPTWALK Walk;
480 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
481 if (RT_SUCCESS(rc))
482 Assert(Walk.fSucceeded); /* probable. */
483 else
484 {
485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
486# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
487 if (Walk.fFailed & PGM_WALKFAIL_EPT)
488 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
489# endif
490 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
491 }
492 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
493 else
494 {
495 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
496# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
497 if (Walk.fFailed & PGM_WALKFAIL_EPT)
498 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
499# endif
500 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
501 }
502 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
503 else
504 {
505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
506# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
507 if (Walk.fFailed & PGM_WALKFAIL_EPT)
508 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
509# endif
510 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
511 }
512 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
513 /** @todo Check reserved bits and such stuff. PGM is better at doing
514 * that, so do it when implementing the guest virtual address
515 * TLB... */
516
517 /*
518 * Read the bytes at this address.
519 */
520 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
521 if (cbToTryRead > cbLeftOnPage)
522 cbToTryRead = cbLeftOnPage;
523 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
524 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
525
526 if (!pVCpu->iem.s.fBypassHandlers)
527 {
528 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
529 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
530 { /* likely */ }
531 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
532 {
533 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
534 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
535 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
536 }
537 else
538 {
539 Log((RT_SUCCESS(rcStrict)
540 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
541 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
542 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
543 return rcStrict;
544 }
545 }
546 else
547 {
548 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
549 if (RT_SUCCESS(rc))
550 { /* likely */ }
551 else
552 {
553 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, rc, cbToTryRead));
555 return rc;
556 }
557 }
558 pVCpu->iem.s.cbOpcode = cbToTryRead;
559#endif /* !IEM_WITH_CODE_TLB */
560 return VINF_SUCCESS;
561}
562
563
564/**
565 * Invalidates the IEM TLBs.
566 *
567 * This is called internally as well as by PGM when moving GC mappings.
568 *
569 * @param pVCpu The cross context virtual CPU structure of the calling
570 * thread.
571 */
572VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
573{
574#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
575 Log10(("IEMTlbInvalidateAll\n"));
576# ifdef IEM_WITH_CODE_TLB
577 pVCpu->iem.s.cbInstrBufTotal = 0;
578 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
579 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
580 { /* very likely */ }
581 else
582 {
583 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
584 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
585 while (i-- > 0)
586 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
587 }
588# endif
589
590# ifdef IEM_WITH_DATA_TLB
591 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
592 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
593 { /* very likely */ }
594 else
595 {
596 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
597 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
598 while (i-- > 0)
599 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
600 }
601# endif
602#else
603 RT_NOREF(pVCpu);
604#endif
605}
606
607
608/**
609 * Invalidates a page in the TLBs.
610 *
611 * @param pVCpu The cross context virtual CPU structure of the calling
612 * thread.
613 * @param GCPtr The address of the page to invalidate
614 * @thread EMT(pVCpu)
615 */
616VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
617{
618#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
619 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
620 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
621 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
622 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
623
624# ifdef IEM_WITH_CODE_TLB
625 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
626 {
627 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
628 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
629 pVCpu->iem.s.cbInstrBufTotal = 0;
630 }
631# endif
632
633# ifdef IEM_WITH_DATA_TLB
634 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
635 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
636# endif
637#else
638 NOREF(pVCpu); NOREF(GCPtr);
639#endif
640}
641
642
643#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
644/**
645 * Invalid both TLBs slow fashion following a rollover.
646 *
647 * Worker for IEMTlbInvalidateAllPhysical,
648 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
649 * iemMemMapJmp and others.
650 *
651 * @thread EMT(pVCpu)
652 */
653static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
654{
655 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
656 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
657 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
658
659 unsigned i;
660# ifdef IEM_WITH_CODE_TLB
661 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
662 while (i-- > 0)
663 {
664 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
665 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
666 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
667 }
668# endif
669# ifdef IEM_WITH_DATA_TLB
670 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
671 while (i-- > 0)
672 {
673 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
674 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
675 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
676 }
677# endif
678
679}
680#endif
681
682
683/**
684 * Invalidates the host physical aspects of the IEM TLBs.
685 *
686 * This is called internally as well as by PGM when moving GC mappings.
687 *
688 * @param pVCpu The cross context virtual CPU structure of the calling
689 * thread.
690 * @note Currently not used.
691 */
692VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
693{
694#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
695 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
696 Log10(("IEMTlbInvalidateAllPhysical\n"));
697
698# ifdef IEM_WITH_CODE_TLB
699 pVCpu->iem.s.cbInstrBufTotal = 0;
700# endif
701 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
702 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
703 {
704 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
705 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
706 }
707 else
708 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
709#else
710 NOREF(pVCpu);
711#endif
712}
713
714
715/**
716 * Invalidates the host physical aspects of the IEM TLBs.
717 *
718 * This is called internally as well as by PGM when moving GC mappings.
719 *
720 * @param pVM The cross context VM structure.
721 * @param idCpuCaller The ID of the calling EMT if available to the caller,
722 * otherwise NIL_VMCPUID.
723 *
724 * @remarks Caller holds the PGM lock.
725 */
726VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
727{
728#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
729 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
730 if (pVCpuCaller)
731 VMCPU_ASSERT_EMT(pVCpuCaller);
732 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
733
734 VMCC_FOR_EACH_VMCPU(pVM)
735 {
736# ifdef IEM_WITH_CODE_TLB
737 if (pVCpuCaller == pVCpu)
738 pVCpu->iem.s.cbInstrBufTotal = 0;
739# endif
740
741 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
742 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
743 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
744 { /* likely */}
745 else if (pVCpuCaller == pVCpu)
746 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
747 else
748 {
749 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
750 continue;
751 }
752 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
753 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
754 }
755 VMCC_FOR_EACH_VMCPU_END(pVM);
756
757#else
758 RT_NOREF(pVM, idCpuCaller);
759#endif
760}
761
762#ifdef IEM_WITH_CODE_TLB
763
764/**
765 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
766 * failure and jumps.
767 *
768 * We end up here for a number of reasons:
769 * - pbInstrBuf isn't yet initialized.
770 * - Advancing beyond the buffer boundrary (e.g. cross page).
771 * - Advancing beyond the CS segment limit.
772 * - Fetching from non-mappable page (e.g. MMIO).
773 *
774 * @param pVCpu The cross context virtual CPU structure of the
775 * calling thread.
776 * @param pvDst Where to return the bytes.
777 * @param cbDst Number of bytes to read.
778 *
779 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
780 */
781void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
782{
783# ifdef IN_RING3
784 for (;;)
785 {
786 Assert(cbDst <= 8);
787 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
788
789 /*
790 * We might have a partial buffer match, deal with that first to make the
791 * rest simpler. This is the first part of the cross page/buffer case.
792 */
793 if (pVCpu->iem.s.pbInstrBuf != NULL)
794 {
795 if (offBuf < pVCpu->iem.s.cbInstrBuf)
796 {
797 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
798 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
799 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
800
801 cbDst -= cbCopy;
802 pvDst = (uint8_t *)pvDst + cbCopy;
803 offBuf += cbCopy;
804 pVCpu->iem.s.offInstrNextByte += offBuf;
805 }
806 }
807
808 /*
809 * Check segment limit, figuring how much we're allowed to access at this point.
810 *
811 * We will fault immediately if RIP is past the segment limit / in non-canonical
812 * territory. If we do continue, there are one or more bytes to read before we
813 * end up in trouble and we need to do that first before faulting.
814 */
815 RTGCPTR GCPtrFirst;
816 uint32_t cbMaxRead;
817 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
818 {
819 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
820 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
821 { /* likely */ }
822 else
823 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
824 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
825 }
826 else
827 {
828 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
829 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
830 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
831 { /* likely */ }
832 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
833 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
834 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
835 if (cbMaxRead != 0)
836 { /* likely */ }
837 else
838 {
839 /* Overflowed because address is 0 and limit is max. */
840 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
841 cbMaxRead = X86_PAGE_SIZE;
842 }
843 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
844 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
845 if (cbMaxRead2 < cbMaxRead)
846 cbMaxRead = cbMaxRead2;
847 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
848 }
849
850 /*
851 * Get the TLB entry for this piece of code.
852 */
853 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
854 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
855 if (pTlbe->uTag == uTag)
856 {
857 /* likely when executing lots of code, otherwise unlikely */
858# ifdef VBOX_WITH_STATISTICS
859 pVCpu->iem.s.CodeTlb.cTlbHits++;
860# endif
861 }
862 else
863 {
864 pVCpu->iem.s.CodeTlb.cTlbMisses++;
865 PGMPTWALK Walk;
866 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
867 if (RT_FAILURE(rc))
868 {
869#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
870 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
871 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
872#endif
873 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
874 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
875 }
876
877 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
878 Assert(Walk.fSucceeded);
879 pTlbe->uTag = uTag;
880 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
881 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
882 pTlbe->GCPhys = Walk.GCPhys;
883 pTlbe->pbMappingR3 = NULL;
884 }
885
886 /*
887 * Check TLB page table level access flags.
888 */
889 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
890 {
891 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
892 {
893 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
894 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
895 }
896 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
897 {
898 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
899 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
900 }
901 }
902
903 /*
904 * Look up the physical page info if necessary.
905 */
906 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
907 { /* not necessary */ }
908 else
909 {
910 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
911 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
912 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
913 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
914 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
915 { /* likely */ }
916 else
917 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
918 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
919 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
920 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
921 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
922 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
923 }
924
925# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
926 /*
927 * Try do a direct read using the pbMappingR3 pointer.
928 */
929 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
930 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
931 {
932 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
933 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
934 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
935 {
936 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
937 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
938 }
939 else
940 {
941 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
942 if (cbInstr + (uint32_t)cbDst <= 15)
943 {
944 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
945 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
946 }
947 else
948 {
949 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
950 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
951 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
952 }
953 }
954 if (cbDst <= cbMaxRead)
955 {
956 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
957 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
958 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
959 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
960 return;
961 }
962 pVCpu->iem.s.pbInstrBuf = NULL;
963
964 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
965 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
966 }
967# else
968# error "refactor as needed"
969 /*
970 * If there is no special read handling, so we can read a bit more and
971 * put it in the prefetch buffer.
972 */
973 if ( cbDst < cbMaxRead
974 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
975 {
976 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
977 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
978 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
979 { /* likely */ }
980 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
981 {
982 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
983 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
984 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
985 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
986 }
987 else
988 {
989 Log((RT_SUCCESS(rcStrict)
990 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
991 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
992 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
993 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
994 }
995 }
996# endif
997 /*
998 * Special read handling, so only read exactly what's needed.
999 * This is a highly unlikely scenario.
1000 */
1001 else
1002 {
1003 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1004
1005 /* Check instruction length. */
1006 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1007 if (RT_LIKELY(cbInstr + cbDst <= 15))
1008 { /* likely */ }
1009 else
1010 {
1011 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1012 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1013 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1014 }
1015
1016 /* Do the reading. */
1017 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1018 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1019 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1020 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1021 { /* likely */ }
1022 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1023 {
1024 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1025 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1026 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1027 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1028 }
1029 else
1030 {
1031 Log((RT_SUCCESS(rcStrict)
1032 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1033 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1034 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1035 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1036 }
1037 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1038 if (cbToRead == cbDst)
1039 return;
1040 }
1041
1042 /*
1043 * More to read, loop.
1044 */
1045 cbDst -= cbMaxRead;
1046 pvDst = (uint8_t *)pvDst + cbMaxRead;
1047 }
1048# else /* !IN_RING3 */
1049 RT_NOREF(pvDst, cbDst);
1050 if (pvDst || cbDst)
1051 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1052# endif /* !IN_RING3 */
1053}
1054
1055#else
1056
1057/**
1058 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1059 * exception if it fails.
1060 *
1061 * @returns Strict VBox status code.
1062 * @param pVCpu The cross context virtual CPU structure of the
1063 * calling thread.
1064 * @param cbMin The minimum number of bytes relative offOpcode
1065 * that must be read.
1066 */
1067VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1068{
1069 /*
1070 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1071 *
1072 * First translate CS:rIP to a physical address.
1073 */
1074 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1075 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1076 uint8_t const cbLeft = cbOpcode - offOpcode;
1077 Assert(cbLeft < cbMin);
1078 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1079
1080 uint32_t cbToTryRead;
1081 RTGCPTR GCPtrNext;
1082 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1083 {
1084 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1085 if (!IEM_IS_CANONICAL(GCPtrNext))
1086 return iemRaiseGeneralProtectionFault0(pVCpu);
1087 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1088 }
1089 else
1090 {
1091 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1092 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1093 GCPtrNext32 += cbOpcode;
1094 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1095 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1096 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1097 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1098 if (!cbToTryRead) /* overflowed */
1099 {
1100 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1101 cbToTryRead = UINT32_MAX;
1102 /** @todo check out wrapping around the code segment. */
1103 }
1104 if (cbToTryRead < cbMin - cbLeft)
1105 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1106 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1107
1108 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1109 if (cbToTryRead > cbLeftOnPage)
1110 cbToTryRead = cbLeftOnPage;
1111 }
1112
1113 /* Restrict to opcode buffer space.
1114
1115 We're making ASSUMPTIONS here based on work done previously in
1116 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1117 be fetched in case of an instruction crossing two pages. */
1118 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1119 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1120 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1121 { /* likely */ }
1122 else
1123 {
1124 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1125 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1126 return iemRaiseGeneralProtectionFault0(pVCpu);
1127 }
1128
1129 PGMPTWALK Walk;
1130 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1131 if (RT_FAILURE(rc))
1132 {
1133 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1134#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1135 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1136 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1137#endif
1138 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1139 }
1140 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1141 {
1142 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1143#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1144 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1145 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1146#endif
1147 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1148 }
1149 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1152#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1153 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1154 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1155#endif
1156 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1157 }
1158 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1159 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1160 /** @todo Check reserved bits and such stuff. PGM is better at doing
1161 * that, so do it when implementing the guest virtual address
1162 * TLB... */
1163
1164 /*
1165 * Read the bytes at this address.
1166 *
1167 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1168 * and since PATM should only patch the start of an instruction there
1169 * should be no need to check again here.
1170 */
1171 if (!pVCpu->iem.s.fBypassHandlers)
1172 {
1173 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1174 cbToTryRead, PGMACCESSORIGIN_IEM);
1175 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1176 { /* likely */ }
1177 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1178 {
1179 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1180 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1181 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1182 }
1183 else
1184 {
1185 Log((RT_SUCCESS(rcStrict)
1186 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1187 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1188 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1189 return rcStrict;
1190 }
1191 }
1192 else
1193 {
1194 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1195 if (RT_SUCCESS(rc))
1196 { /* likely */ }
1197 else
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1200 return rc;
1201 }
1202 }
1203 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1204 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1205
1206 return VINF_SUCCESS;
1207}
1208
1209#endif /* !IEM_WITH_CODE_TLB */
1210#ifndef IEM_WITH_SETJMP
1211
1212/**
1213 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1214 *
1215 * @returns Strict VBox status code.
1216 * @param pVCpu The cross context virtual CPU structure of the
1217 * calling thread.
1218 * @param pb Where to return the opcode byte.
1219 */
1220VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1221{
1222 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1223 if (rcStrict == VINF_SUCCESS)
1224 {
1225 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1226 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1227 pVCpu->iem.s.offOpcode = offOpcode + 1;
1228 }
1229 else
1230 *pb = 0;
1231 return rcStrict;
1232}
1233
1234#else /* IEM_WITH_SETJMP */
1235
1236/**
1237 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1238 *
1239 * @returns The opcode byte.
1240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1241 */
1242uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1243{
1244# ifdef IEM_WITH_CODE_TLB
1245 uint8_t u8;
1246 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1247 return u8;
1248# else
1249 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1250 if (rcStrict == VINF_SUCCESS)
1251 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1252 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1253# endif
1254}
1255
1256#endif /* IEM_WITH_SETJMP */
1257
1258#ifndef IEM_WITH_SETJMP
1259
1260/**
1261 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1262 *
1263 * @returns Strict VBox status code.
1264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1265 * @param pu16 Where to return the opcode dword.
1266 */
1267VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1268{
1269 uint8_t u8;
1270 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1271 if (rcStrict == VINF_SUCCESS)
1272 *pu16 = (int8_t)u8;
1273 return rcStrict;
1274}
1275
1276
1277/**
1278 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1282 * @param pu32 Where to return the opcode dword.
1283 */
1284VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1285{
1286 uint8_t u8;
1287 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1288 if (rcStrict == VINF_SUCCESS)
1289 *pu32 = (int8_t)u8;
1290 return rcStrict;
1291}
1292
1293
1294/**
1295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1296 *
1297 * @returns Strict VBox status code.
1298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1299 * @param pu64 Where to return the opcode qword.
1300 */
1301VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1302{
1303 uint8_t u8;
1304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1305 if (rcStrict == VINF_SUCCESS)
1306 *pu64 = (int8_t)u8;
1307 return rcStrict;
1308}
1309
1310#endif /* !IEM_WITH_SETJMP */
1311
1312
1313#ifndef IEM_WITH_SETJMP
1314
1315/**
1316 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1317 *
1318 * @returns Strict VBox status code.
1319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1320 * @param pu16 Where to return the opcode word.
1321 */
1322VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1323{
1324 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1325 if (rcStrict == VINF_SUCCESS)
1326 {
1327 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1328# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1329 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1330# else
1331 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1332# endif
1333 pVCpu->iem.s.offOpcode = offOpcode + 2;
1334 }
1335 else
1336 *pu16 = 0;
1337 return rcStrict;
1338}
1339
1340#else /* IEM_WITH_SETJMP */
1341
1342/**
1343 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1344 *
1345 * @returns The opcode word.
1346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1347 */
1348uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1349{
1350# ifdef IEM_WITH_CODE_TLB
1351 uint16_t u16;
1352 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1353 return u16;
1354# else
1355 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1356 if (rcStrict == VINF_SUCCESS)
1357 {
1358 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1359 pVCpu->iem.s.offOpcode += 2;
1360# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1361 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1362# else
1363 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1364# endif
1365 }
1366 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1367# endif
1368}
1369
1370#endif /* IEM_WITH_SETJMP */
1371
1372#ifndef IEM_WITH_SETJMP
1373
1374/**
1375 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1376 *
1377 * @returns Strict VBox status code.
1378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1379 * @param pu32 Where to return the opcode double word.
1380 */
1381VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1382{
1383 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1384 if (rcStrict == VINF_SUCCESS)
1385 {
1386 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1387 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1388 pVCpu->iem.s.offOpcode = offOpcode + 2;
1389 }
1390 else
1391 *pu32 = 0;
1392 return rcStrict;
1393}
1394
1395
1396/**
1397 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1398 *
1399 * @returns Strict VBox status code.
1400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1401 * @param pu64 Where to return the opcode quad word.
1402 */
1403VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1404{
1405 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1406 if (rcStrict == VINF_SUCCESS)
1407 {
1408 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1409 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1410 pVCpu->iem.s.offOpcode = offOpcode + 2;
1411 }
1412 else
1413 *pu64 = 0;
1414 return rcStrict;
1415}
1416
1417#endif /* !IEM_WITH_SETJMP */
1418
1419#ifndef IEM_WITH_SETJMP
1420
1421/**
1422 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1423 *
1424 * @returns Strict VBox status code.
1425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1426 * @param pu32 Where to return the opcode dword.
1427 */
1428VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1429{
1430 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1431 if (rcStrict == VINF_SUCCESS)
1432 {
1433 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1434# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1435 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1436# else
1437 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1438 pVCpu->iem.s.abOpcode[offOpcode + 1],
1439 pVCpu->iem.s.abOpcode[offOpcode + 2],
1440 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1441# endif
1442 pVCpu->iem.s.offOpcode = offOpcode + 4;
1443 }
1444 else
1445 *pu32 = 0;
1446 return rcStrict;
1447}
1448
1449#else /* IEM_WITH_SETJMP */
1450
1451/**
1452 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1453 *
1454 * @returns The opcode dword.
1455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1456 */
1457uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1458{
1459# ifdef IEM_WITH_CODE_TLB
1460 uint32_t u32;
1461 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1462 return u32;
1463# else
1464 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1465 if (rcStrict == VINF_SUCCESS)
1466 {
1467 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1468 pVCpu->iem.s.offOpcode = offOpcode + 4;
1469# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1470 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1471# else
1472 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1473 pVCpu->iem.s.abOpcode[offOpcode + 1],
1474 pVCpu->iem.s.abOpcode[offOpcode + 2],
1475 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1476# endif
1477 }
1478 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1479# endif
1480}
1481
1482#endif /* IEM_WITH_SETJMP */
1483
1484#ifndef IEM_WITH_SETJMP
1485
1486/**
1487 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1491 * @param pu64 Where to return the opcode dword.
1492 */
1493VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1494{
1495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1496 if (rcStrict == VINF_SUCCESS)
1497 {
1498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1499 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1500 pVCpu->iem.s.abOpcode[offOpcode + 1],
1501 pVCpu->iem.s.abOpcode[offOpcode + 2],
1502 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1503 pVCpu->iem.s.offOpcode = offOpcode + 4;
1504 }
1505 else
1506 *pu64 = 0;
1507 return rcStrict;
1508}
1509
1510
1511/**
1512 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1513 *
1514 * @returns Strict VBox status code.
1515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1516 * @param pu64 Where to return the opcode qword.
1517 */
1518VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1519{
1520 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1521 if (rcStrict == VINF_SUCCESS)
1522 {
1523 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1524 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1525 pVCpu->iem.s.abOpcode[offOpcode + 1],
1526 pVCpu->iem.s.abOpcode[offOpcode + 2],
1527 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1528 pVCpu->iem.s.offOpcode = offOpcode + 4;
1529 }
1530 else
1531 *pu64 = 0;
1532 return rcStrict;
1533}
1534
1535#endif /* !IEM_WITH_SETJMP */
1536
1537#ifndef IEM_WITH_SETJMP
1538
1539/**
1540 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1541 *
1542 * @returns Strict VBox status code.
1543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1544 * @param pu64 Where to return the opcode qword.
1545 */
1546VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1547{
1548 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1549 if (rcStrict == VINF_SUCCESS)
1550 {
1551 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1552# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1553 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1554# else
1555 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1556 pVCpu->iem.s.abOpcode[offOpcode + 1],
1557 pVCpu->iem.s.abOpcode[offOpcode + 2],
1558 pVCpu->iem.s.abOpcode[offOpcode + 3],
1559 pVCpu->iem.s.abOpcode[offOpcode + 4],
1560 pVCpu->iem.s.abOpcode[offOpcode + 5],
1561 pVCpu->iem.s.abOpcode[offOpcode + 6],
1562 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1563# endif
1564 pVCpu->iem.s.offOpcode = offOpcode + 8;
1565 }
1566 else
1567 *pu64 = 0;
1568 return rcStrict;
1569}
1570
1571#else /* IEM_WITH_SETJMP */
1572
1573/**
1574 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1575 *
1576 * @returns The opcode qword.
1577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1578 */
1579uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1580{
1581# ifdef IEM_WITH_CODE_TLB
1582 uint64_t u64;
1583 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1584 return u64;
1585# else
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 pVCpu->iem.s.offOpcode = offOpcode + 8;
1591# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1592 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1593# else
1594 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1595 pVCpu->iem.s.abOpcode[offOpcode + 1],
1596 pVCpu->iem.s.abOpcode[offOpcode + 2],
1597 pVCpu->iem.s.abOpcode[offOpcode + 3],
1598 pVCpu->iem.s.abOpcode[offOpcode + 4],
1599 pVCpu->iem.s.abOpcode[offOpcode + 5],
1600 pVCpu->iem.s.abOpcode[offOpcode + 6],
1601 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1602# endif
1603 }
1604 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1605# endif
1606}
1607
1608#endif /* IEM_WITH_SETJMP */
1609
1610
1611
1612/** @name Misc Worker Functions.
1613 * @{
1614 */
1615
1616/**
1617 * Gets the exception class for the specified exception vector.
1618 *
1619 * @returns The class of the specified exception.
1620 * @param uVector The exception vector.
1621 */
1622static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1623{
1624 Assert(uVector <= X86_XCPT_LAST);
1625 switch (uVector)
1626 {
1627 case X86_XCPT_DE:
1628 case X86_XCPT_TS:
1629 case X86_XCPT_NP:
1630 case X86_XCPT_SS:
1631 case X86_XCPT_GP:
1632 case X86_XCPT_SX: /* AMD only */
1633 return IEMXCPTCLASS_CONTRIBUTORY;
1634
1635 case X86_XCPT_PF:
1636 case X86_XCPT_VE: /* Intel only */
1637 return IEMXCPTCLASS_PAGE_FAULT;
1638
1639 case X86_XCPT_DF:
1640 return IEMXCPTCLASS_DOUBLE_FAULT;
1641 }
1642 return IEMXCPTCLASS_BENIGN;
1643}
1644
1645
1646/**
1647 * Evaluates how to handle an exception caused during delivery of another event
1648 * (exception / interrupt).
1649 *
1650 * @returns How to handle the recursive exception.
1651 * @param pVCpu The cross context virtual CPU structure of the
1652 * calling thread.
1653 * @param fPrevFlags The flags of the previous event.
1654 * @param uPrevVector The vector of the previous event.
1655 * @param fCurFlags The flags of the current exception.
1656 * @param uCurVector The vector of the current exception.
1657 * @param pfXcptRaiseInfo Where to store additional information about the
1658 * exception condition. Optional.
1659 */
1660VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1661 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1662{
1663 /*
1664 * Only CPU exceptions can be raised while delivering other events, software interrupt
1665 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1666 */
1667 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1668 Assert(pVCpu); RT_NOREF(pVCpu);
1669 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1670
1671 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1672 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1673 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1674 {
1675 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1676 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1677 {
1678 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1679 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1680 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1681 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1682 {
1683 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1684 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1685 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1686 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1687 uCurVector, pVCpu->cpum.GstCtx.cr2));
1688 }
1689 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1690 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1691 {
1692 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1693 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1694 }
1695 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1696 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1697 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1698 {
1699 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1700 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1701 }
1702 }
1703 else
1704 {
1705 if (uPrevVector == X86_XCPT_NMI)
1706 {
1707 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1708 if (uCurVector == X86_XCPT_PF)
1709 {
1710 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1711 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1712 }
1713 }
1714 else if ( uPrevVector == X86_XCPT_AC
1715 && uCurVector == X86_XCPT_AC)
1716 {
1717 enmRaise = IEMXCPTRAISE_CPU_HANG;
1718 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1719 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1720 }
1721 }
1722 }
1723 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1724 {
1725 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1726 if (uCurVector == X86_XCPT_PF)
1727 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1728 }
1729 else
1730 {
1731 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1732 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1733 }
1734
1735 if (pfXcptRaiseInfo)
1736 *pfXcptRaiseInfo = fRaiseInfo;
1737 return enmRaise;
1738}
1739
1740
1741/**
1742 * Enters the CPU shutdown state initiated by a triple fault or other
1743 * unrecoverable conditions.
1744 *
1745 * @returns Strict VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure of the
1747 * calling thread.
1748 */
1749static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1750{
1751 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1752 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1753
1754 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1755 {
1756 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1757 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1758 }
1759
1760 RT_NOREF(pVCpu);
1761 return VINF_EM_TRIPLE_FAULT;
1762}
1763
1764
1765/**
1766 * Validates a new SS segment.
1767 *
1768 * @returns VBox strict status code.
1769 * @param pVCpu The cross context virtual CPU structure of the
1770 * calling thread.
1771 * @param NewSS The new SS selctor.
1772 * @param uCpl The CPL to load the stack for.
1773 * @param pDesc Where to return the descriptor.
1774 */
1775static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1776{
1777 /* Null selectors are not allowed (we're not called for dispatching
1778 interrupts with SS=0 in long mode). */
1779 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1780 {
1781 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1782 return iemRaiseTaskSwitchFault0(pVCpu);
1783 }
1784
1785 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1786 if ((NewSS & X86_SEL_RPL) != uCpl)
1787 {
1788 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1789 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1790 }
1791
1792 /*
1793 * Read the descriptor.
1794 */
1795 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1796 if (rcStrict != VINF_SUCCESS)
1797 return rcStrict;
1798
1799 /*
1800 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1801 */
1802 if (!pDesc->Legacy.Gen.u1DescType)
1803 {
1804 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1805 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1806 }
1807
1808 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1809 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1810 {
1811 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1812 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1813 }
1814 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1815 {
1816 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1817 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1818 }
1819
1820 /* Is it there? */
1821 /** @todo testcase: Is this checked before the canonical / limit check below? */
1822 if (!pDesc->Legacy.Gen.u1Present)
1823 {
1824 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1825 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1826 }
1827
1828 return VINF_SUCCESS;
1829}
1830
1831/** @} */
1832
1833
1834/** @name Raising Exceptions.
1835 *
1836 * @{
1837 */
1838
1839
1840/**
1841 * Loads the specified stack far pointer from the TSS.
1842 *
1843 * @returns VBox strict status code.
1844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1845 * @param uCpl The CPL to load the stack for.
1846 * @param pSelSS Where to return the new stack segment.
1847 * @param puEsp Where to return the new stack pointer.
1848 */
1849static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1850{
1851 VBOXSTRICTRC rcStrict;
1852 Assert(uCpl < 4);
1853
1854 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1855 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1856 {
1857 /*
1858 * 16-bit TSS (X86TSS16).
1859 */
1860 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1861 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1862 {
1863 uint32_t off = uCpl * 4 + 2;
1864 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1865 {
1866 /** @todo check actual access pattern here. */
1867 uint32_t u32Tmp = 0; /* gcc maybe... */
1868 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1869 if (rcStrict == VINF_SUCCESS)
1870 {
1871 *puEsp = RT_LOWORD(u32Tmp);
1872 *pSelSS = RT_HIWORD(u32Tmp);
1873 return VINF_SUCCESS;
1874 }
1875 }
1876 else
1877 {
1878 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1879 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1880 }
1881 break;
1882 }
1883
1884 /*
1885 * 32-bit TSS (X86TSS32).
1886 */
1887 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1888 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1889 {
1890 uint32_t off = uCpl * 8 + 4;
1891 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1892 {
1893/** @todo check actual access pattern here. */
1894 uint64_t u64Tmp;
1895 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1896 if (rcStrict == VINF_SUCCESS)
1897 {
1898 *puEsp = u64Tmp & UINT32_MAX;
1899 *pSelSS = (RTSEL)(u64Tmp >> 32);
1900 return VINF_SUCCESS;
1901 }
1902 }
1903 else
1904 {
1905 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1906 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1907 }
1908 break;
1909 }
1910
1911 default:
1912 AssertFailed();
1913 rcStrict = VERR_IEM_IPE_4;
1914 break;
1915 }
1916
1917 *puEsp = 0; /* make gcc happy */
1918 *pSelSS = 0; /* make gcc happy */
1919 return rcStrict;
1920}
1921
1922
1923/**
1924 * Loads the specified stack pointer from the 64-bit TSS.
1925 *
1926 * @returns VBox strict status code.
1927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1928 * @param uCpl The CPL to load the stack for.
1929 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1930 * @param puRsp Where to return the new stack pointer.
1931 */
1932static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1933{
1934 Assert(uCpl < 4);
1935 Assert(uIst < 8);
1936 *puRsp = 0; /* make gcc happy */
1937
1938 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1939 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1940
1941 uint32_t off;
1942 if (uIst)
1943 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1944 else
1945 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1946 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1947 {
1948 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1949 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1950 }
1951
1952 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1953}
1954
1955
1956/**
1957 * Adjust the CPU state according to the exception being raised.
1958 *
1959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1960 * @param u8Vector The exception that has been raised.
1961 */
1962DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1963{
1964 switch (u8Vector)
1965 {
1966 case X86_XCPT_DB:
1967 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1968 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1969 break;
1970 /** @todo Read the AMD and Intel exception reference... */
1971 }
1972}
1973
1974
1975/**
1976 * Implements exceptions and interrupts for real mode.
1977 *
1978 * @returns VBox strict status code.
1979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1980 * @param cbInstr The number of bytes to offset rIP by in the return
1981 * address.
1982 * @param u8Vector The interrupt / exception vector number.
1983 * @param fFlags The flags.
1984 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1985 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1986 */
1987static VBOXSTRICTRC
1988iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1989 uint8_t cbInstr,
1990 uint8_t u8Vector,
1991 uint32_t fFlags,
1992 uint16_t uErr,
1993 uint64_t uCr2) RT_NOEXCEPT
1994{
1995 NOREF(uErr); NOREF(uCr2);
1996 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1997
1998 /*
1999 * Read the IDT entry.
2000 */
2001 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2002 {
2003 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2004 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2005 }
2006 RTFAR16 Idte;
2007 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2008 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2009 {
2010 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2011 return rcStrict;
2012 }
2013
2014 /*
2015 * Push the stack frame.
2016 */
2017 uint16_t *pu16Frame;
2018 uint64_t uNewRsp;
2019 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2020 if (rcStrict != VINF_SUCCESS)
2021 return rcStrict;
2022
2023 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2024#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2025 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2026 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2027 fEfl |= UINT16_C(0xf000);
2028#endif
2029 pu16Frame[2] = (uint16_t)fEfl;
2030 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2031 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2032 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2033 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2034 return rcStrict;
2035
2036 /*
2037 * Load the vector address into cs:ip and make exception specific state
2038 * adjustments.
2039 */
2040 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2041 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2042 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2043 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2044 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2045 pVCpu->cpum.GstCtx.rip = Idte.off;
2046 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2047 IEMMISC_SET_EFL(pVCpu, fEfl);
2048
2049 /** @todo do we actually do this in real mode? */
2050 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2051 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2052
2053 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Loads a NULL data selector into when coming from V8086 mode.
2059 *
2060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2061 * @param pSReg Pointer to the segment register.
2062 */
2063DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2064{
2065 pSReg->Sel = 0;
2066 pSReg->ValidSel = 0;
2067 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2068 {
2069 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2070 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2071 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2072 }
2073 else
2074 {
2075 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2076 /** @todo check this on AMD-V */
2077 pSReg->u64Base = 0;
2078 pSReg->u32Limit = 0;
2079 }
2080}
2081
2082
2083/**
2084 * Loads a segment selector during a task switch in V8086 mode.
2085 *
2086 * @param pSReg Pointer to the segment register.
2087 * @param uSel The selector value to load.
2088 */
2089DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2090{
2091 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2092 pSReg->Sel = uSel;
2093 pSReg->ValidSel = uSel;
2094 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2095 pSReg->u64Base = uSel << 4;
2096 pSReg->u32Limit = 0xffff;
2097 pSReg->Attr.u = 0xf3;
2098}
2099
2100
2101/**
2102 * Loads a segment selector during a task switch in protected mode.
2103 *
2104 * In this task switch scenario, we would throw \#TS exceptions rather than
2105 * \#GPs.
2106 *
2107 * @returns VBox strict status code.
2108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2109 * @param pSReg Pointer to the segment register.
2110 * @param uSel The new selector value.
2111 *
2112 * @remarks This does _not_ handle CS or SS.
2113 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2114 */
2115static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2116{
2117 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2118
2119 /* Null data selector. */
2120 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2121 {
2122 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2124 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2125 return VINF_SUCCESS;
2126 }
2127
2128 /* Fetch the descriptor. */
2129 IEMSELDESC Desc;
2130 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2131 if (rcStrict != VINF_SUCCESS)
2132 {
2133 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2134 VBOXSTRICTRC_VAL(rcStrict)));
2135 return rcStrict;
2136 }
2137
2138 /* Must be a data segment or readable code segment. */
2139 if ( !Desc.Legacy.Gen.u1DescType
2140 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2141 {
2142 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2143 Desc.Legacy.Gen.u4Type));
2144 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2145 }
2146
2147 /* Check privileges for data segments and non-conforming code segments. */
2148 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2149 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2150 {
2151 /* The RPL and the new CPL must be less than or equal to the DPL. */
2152 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2153 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2154 {
2155 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2156 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2157 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2158 }
2159 }
2160
2161 /* Is it there? */
2162 if (!Desc.Legacy.Gen.u1Present)
2163 {
2164 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2165 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2166 }
2167
2168 /* The base and limit. */
2169 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2170 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2171
2172 /*
2173 * Ok, everything checked out fine. Now set the accessed bit before
2174 * committing the result into the registers.
2175 */
2176 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2177 {
2178 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2179 if (rcStrict != VINF_SUCCESS)
2180 return rcStrict;
2181 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2182 }
2183
2184 /* Commit */
2185 pSReg->Sel = uSel;
2186 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2187 pSReg->u32Limit = cbLimit;
2188 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2189 pSReg->ValidSel = uSel;
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2192 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2193
2194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2195 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2196 return VINF_SUCCESS;
2197}
2198
2199
2200/**
2201 * Performs a task switch.
2202 *
2203 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2204 * caller is responsible for performing the necessary checks (like DPL, TSS
2205 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2206 * reference for JMP, CALL, IRET.
2207 *
2208 * If the task switch is the due to a software interrupt or hardware exception,
2209 * the caller is responsible for validating the TSS selector and descriptor. See
2210 * Intel Instruction reference for INT n.
2211 *
2212 * @returns VBox strict status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param enmTaskSwitch The cause of the task switch.
2215 * @param uNextEip The EIP effective after the task switch.
2216 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2217 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2218 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2219 * @param SelTSS The TSS selector of the new task.
2220 * @param pNewDescTSS Pointer to the new TSS descriptor.
2221 */
2222VBOXSTRICTRC
2223iemTaskSwitch(PVMCPUCC pVCpu,
2224 IEMTASKSWITCH enmTaskSwitch,
2225 uint32_t uNextEip,
2226 uint32_t fFlags,
2227 uint16_t uErr,
2228 uint64_t uCr2,
2229 RTSEL SelTSS,
2230 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_REAL_MODE(pVCpu));
2233 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2234 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2235
2236 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2237 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2238 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2239 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2240 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2241
2242 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2243 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2244
2245 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2246 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2247
2248 /* Update CR2 in case it's a page-fault. */
2249 /** @todo This should probably be done much earlier in IEM/PGM. See
2250 * @bugref{5653#c49}. */
2251 if (fFlags & IEM_XCPT_FLAGS_CR2)
2252 pVCpu->cpum.GstCtx.cr2 = uCr2;
2253
2254 /*
2255 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2256 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2257 */
2258 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2259 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2260 if (uNewTSSLimit < uNewTSSLimitMin)
2261 {
2262 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2263 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2264 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2265 }
2266
2267 /*
2268 * Task switches in VMX non-root mode always cause task switches.
2269 * The new TSS must have been read and validated (DPL, limits etc.) before a
2270 * task-switch VM-exit commences.
2271 *
2272 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2273 */
2274 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2275 {
2276 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2277 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2278 }
2279
2280 /*
2281 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2282 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2283 */
2284 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2285 {
2286 uint32_t const uExitInfo1 = SelTSS;
2287 uint32_t uExitInfo2 = uErr;
2288 switch (enmTaskSwitch)
2289 {
2290 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2291 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2292 default: break;
2293 }
2294 if (fFlags & IEM_XCPT_FLAGS_ERR)
2295 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2296 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2297 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2298
2299 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2300 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2301 RT_NOREF2(uExitInfo1, uExitInfo2);
2302 }
2303
2304 /*
2305 * Check the current TSS limit. The last written byte to the current TSS during the
2306 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2307 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2308 *
2309 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2310 * end up with smaller than "legal" TSS limits.
2311 */
2312 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2313 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2314 if (uCurTSSLimit < uCurTSSLimitMin)
2315 {
2316 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2317 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2318 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2319 }
2320
2321 /*
2322 * Verify that the new TSS can be accessed and map it. Map only the required contents
2323 * and not the entire TSS.
2324 */
2325 void *pvNewTSS;
2326 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2327 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2328 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2329 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2330 * not perform correct translation if this happens. See Intel spec. 7.2.1
2331 * "Task-State Segment". */
2332 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2333 if (rcStrict != VINF_SUCCESS)
2334 {
2335 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2336 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2337 return rcStrict;
2338 }
2339
2340 /*
2341 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2342 */
2343 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2344 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2345 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2346 {
2347 PX86DESC pDescCurTSS;
2348 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2349 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2350 if (rcStrict != VINF_SUCCESS)
2351 {
2352 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2353 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2354 return rcStrict;
2355 }
2356
2357 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2358 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2359 if (rcStrict != VINF_SUCCESS)
2360 {
2361 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2362 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2363 return rcStrict;
2364 }
2365
2366 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2367 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2368 {
2369 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2370 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2371 fEFlags &= ~X86_EFL_NT;
2372 }
2373 }
2374
2375 /*
2376 * Save the CPU state into the current TSS.
2377 */
2378 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2379 if (GCPtrNewTSS == GCPtrCurTSS)
2380 {
2381 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2382 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2383 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2384 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2385 pVCpu->cpum.GstCtx.ldtr.Sel));
2386 }
2387 if (fIsNewTSS386)
2388 {
2389 /*
2390 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2391 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2392 */
2393 void *pvCurTSS32;
2394 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2395 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2396 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2397 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2398 if (rcStrict != VINF_SUCCESS)
2399 {
2400 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2401 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2402 return rcStrict;
2403 }
2404
2405 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2406 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2407 pCurTSS32->eip = uNextEip;
2408 pCurTSS32->eflags = fEFlags;
2409 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2410 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2411 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2412 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2413 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2414 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2415 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2416 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2417 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2418 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2419 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2420 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2421 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2422 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2423
2424 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2425 if (rcStrict != VINF_SUCCESS)
2426 {
2427 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2428 VBOXSTRICTRC_VAL(rcStrict)));
2429 return rcStrict;
2430 }
2431 }
2432 else
2433 {
2434 /*
2435 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2436 */
2437 void *pvCurTSS16;
2438 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2439 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2440 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2441 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2442 if (rcStrict != VINF_SUCCESS)
2443 {
2444 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2445 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2446 return rcStrict;
2447 }
2448
2449 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2450 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2451 pCurTSS16->ip = uNextEip;
2452 pCurTSS16->flags = (uint16_t)fEFlags;
2453 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2454 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2455 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2456 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2457 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2458 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2459 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2460 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2461 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2462 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2463 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2464 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2465
2466 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2467 if (rcStrict != VINF_SUCCESS)
2468 {
2469 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2470 VBOXSTRICTRC_VAL(rcStrict)));
2471 return rcStrict;
2472 }
2473 }
2474
2475 /*
2476 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2477 */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2482 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2483 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2484 }
2485
2486 /*
2487 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2488 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2489 */
2490 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2491 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2492 bool fNewDebugTrap;
2493 if (fIsNewTSS386)
2494 {
2495 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2496 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2497 uNewEip = pNewTSS32->eip;
2498 uNewEflags = pNewTSS32->eflags;
2499 uNewEax = pNewTSS32->eax;
2500 uNewEcx = pNewTSS32->ecx;
2501 uNewEdx = pNewTSS32->edx;
2502 uNewEbx = pNewTSS32->ebx;
2503 uNewEsp = pNewTSS32->esp;
2504 uNewEbp = pNewTSS32->ebp;
2505 uNewEsi = pNewTSS32->esi;
2506 uNewEdi = pNewTSS32->edi;
2507 uNewES = pNewTSS32->es;
2508 uNewCS = pNewTSS32->cs;
2509 uNewSS = pNewTSS32->ss;
2510 uNewDS = pNewTSS32->ds;
2511 uNewFS = pNewTSS32->fs;
2512 uNewGS = pNewTSS32->gs;
2513 uNewLdt = pNewTSS32->selLdt;
2514 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2515 }
2516 else
2517 {
2518 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2519 uNewCr3 = 0;
2520 uNewEip = pNewTSS16->ip;
2521 uNewEflags = pNewTSS16->flags;
2522 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2523 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2524 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2525 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2526 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2527 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2528 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2529 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2530 uNewES = pNewTSS16->es;
2531 uNewCS = pNewTSS16->cs;
2532 uNewSS = pNewTSS16->ss;
2533 uNewDS = pNewTSS16->ds;
2534 uNewFS = 0;
2535 uNewGS = 0;
2536 uNewLdt = pNewTSS16->selLdt;
2537 fNewDebugTrap = false;
2538 }
2539
2540 if (GCPtrNewTSS == GCPtrCurTSS)
2541 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2542 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2543
2544 /*
2545 * We're done accessing the new TSS.
2546 */
2547 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2548 if (rcStrict != VINF_SUCCESS)
2549 {
2550 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2551 return rcStrict;
2552 }
2553
2554 /*
2555 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2556 */
2557 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2558 {
2559 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2560 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2564 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567
2568 /* Check that the descriptor indicates the new TSS is available (not busy). */
2569 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2570 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2571 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2572
2573 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2574 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2575 if (rcStrict != VINF_SUCCESS)
2576 {
2577 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2578 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2579 return rcStrict;
2580 }
2581 }
2582
2583 /*
2584 * From this point on, we're technically in the new task. We will defer exceptions
2585 * until the completion of the task switch but before executing any instructions in the new task.
2586 */
2587 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2588 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2589 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2590 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2591 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2592 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2593 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2594
2595 /* Set the busy bit in TR. */
2596 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2597
2598 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2599 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2600 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2601 {
2602 uNewEflags |= X86_EFL_NT;
2603 }
2604
2605 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2606 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2607 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2608
2609 pVCpu->cpum.GstCtx.eip = uNewEip;
2610 pVCpu->cpum.GstCtx.eax = uNewEax;
2611 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2612 pVCpu->cpum.GstCtx.edx = uNewEdx;
2613 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2614 pVCpu->cpum.GstCtx.esp = uNewEsp;
2615 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2616 pVCpu->cpum.GstCtx.esi = uNewEsi;
2617 pVCpu->cpum.GstCtx.edi = uNewEdi;
2618
2619 uNewEflags &= X86_EFL_LIVE_MASK;
2620 uNewEflags |= X86_EFL_RA1_MASK;
2621 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2622
2623 /*
2624 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2625 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2626 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2627 */
2628 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2629 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2630
2631 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2632 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2633
2634 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2635 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2636
2637 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2638 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2639
2640 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2641 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2642
2643 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2644 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2645 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2646
2647 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2648 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2649 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2650 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2651
2652 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2653 {
2654 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2655 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2656 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2657 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2658 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2659 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2660 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2661 }
2662
2663 /*
2664 * Switch CR3 for the new task.
2665 */
2666 if ( fIsNewTSS386
2667 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2668 {
2669 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2670 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2671 AssertRCSuccessReturn(rc, rc);
2672
2673 /* Inform PGM. */
2674 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2675 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2676 AssertRCReturn(rc, rc);
2677 /* ignore informational status codes */
2678
2679 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2680 }
2681
2682 /*
2683 * Switch LDTR for the new task.
2684 */
2685 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2686 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2687 else
2688 {
2689 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2690
2691 IEMSELDESC DescNewLdt;
2692 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2693 if (rcStrict != VINF_SUCCESS)
2694 {
2695 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2696 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2697 return rcStrict;
2698 }
2699 if ( !DescNewLdt.Legacy.Gen.u1Present
2700 || DescNewLdt.Legacy.Gen.u1DescType
2701 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2702 {
2703 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2704 uNewLdt, DescNewLdt.Legacy.u));
2705 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2706 }
2707
2708 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2709 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2710 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2711 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2712 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2713 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2714 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2715 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2716 }
2717
2718 IEMSELDESC DescSS;
2719 if (IEM_IS_V86_MODE(pVCpu))
2720 {
2721 pVCpu->iem.s.uCpl = 3;
2722 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2723 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2724 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2725 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2726 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2727 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2728
2729 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2730 DescSS.Legacy.u = 0;
2731 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2732 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2733 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2734 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2735 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2736 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2737 DescSS.Legacy.Gen.u2Dpl = 3;
2738 }
2739 else
2740 {
2741 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2742
2743 /*
2744 * Load the stack segment for the new task.
2745 */
2746 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2747 {
2748 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2749 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2750 }
2751
2752 /* Fetch the descriptor. */
2753 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2754 if (rcStrict != VINF_SUCCESS)
2755 {
2756 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2757 VBOXSTRICTRC_VAL(rcStrict)));
2758 return rcStrict;
2759 }
2760
2761 /* SS must be a data segment and writable. */
2762 if ( !DescSS.Legacy.Gen.u1DescType
2763 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2764 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2765 {
2766 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2767 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2768 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2769 }
2770
2771 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2772 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2773 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2774 {
2775 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2776 uNewCpl));
2777 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 /* Is it there? */
2781 if (!DescSS.Legacy.Gen.u1Present)
2782 {
2783 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2784 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2785 }
2786
2787 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2788 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2789
2790 /* Set the accessed bit before committing the result into SS. */
2791 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2792 {
2793 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2794 if (rcStrict != VINF_SUCCESS)
2795 return rcStrict;
2796 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2797 }
2798
2799 /* Commit SS. */
2800 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2801 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2802 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2803 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2804 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2805 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2807
2808 /* CPL has changed, update IEM before loading rest of segments. */
2809 pVCpu->iem.s.uCpl = uNewCpl;
2810
2811 /*
2812 * Load the data segments for the new task.
2813 */
2814 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2815 if (rcStrict != VINF_SUCCESS)
2816 return rcStrict;
2817 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2818 if (rcStrict != VINF_SUCCESS)
2819 return rcStrict;
2820 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2821 if (rcStrict != VINF_SUCCESS)
2822 return rcStrict;
2823 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2824 if (rcStrict != VINF_SUCCESS)
2825 return rcStrict;
2826
2827 /*
2828 * Load the code segment for the new task.
2829 */
2830 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2831 {
2832 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2833 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2834 }
2835
2836 /* Fetch the descriptor. */
2837 IEMSELDESC DescCS;
2838 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2839 if (rcStrict != VINF_SUCCESS)
2840 {
2841 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2842 return rcStrict;
2843 }
2844
2845 /* CS must be a code segment. */
2846 if ( !DescCS.Legacy.Gen.u1DescType
2847 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2848 {
2849 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2850 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2851 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2852 }
2853
2854 /* For conforming CS, DPL must be less than or equal to the RPL. */
2855 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2856 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2857 {
2858 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2859 DescCS.Legacy.Gen.u2Dpl));
2860 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2861 }
2862
2863 /* For non-conforming CS, DPL must match RPL. */
2864 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2865 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2866 {
2867 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2868 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2869 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2870 }
2871
2872 /* Is it there? */
2873 if (!DescCS.Legacy.Gen.u1Present)
2874 {
2875 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2876 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2877 }
2878
2879 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2880 u64Base = X86DESC_BASE(&DescCS.Legacy);
2881
2882 /* Set the accessed bit before committing the result into CS. */
2883 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2884 {
2885 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2886 if (rcStrict != VINF_SUCCESS)
2887 return rcStrict;
2888 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2889 }
2890
2891 /* Commit CS. */
2892 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2893 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2894 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2895 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2896 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2897 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2898 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2899 }
2900
2901 /** @todo Debug trap. */
2902 if (fIsNewTSS386 && fNewDebugTrap)
2903 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2904
2905 /*
2906 * Construct the error code masks based on what caused this task switch.
2907 * See Intel Instruction reference for INT.
2908 */
2909 uint16_t uExt;
2910 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2911 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2912 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2913 {
2914 uExt = 1;
2915 }
2916 else
2917 uExt = 0;
2918
2919 /*
2920 * Push any error code on to the new stack.
2921 */
2922 if (fFlags & IEM_XCPT_FLAGS_ERR)
2923 {
2924 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2925 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2926 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2927
2928 /* Check that there is sufficient space on the stack. */
2929 /** @todo Factor out segment limit checking for normal/expand down segments
2930 * into a separate function. */
2931 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2932 {
2933 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2934 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2935 {
2936 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2937 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2938 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2939 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2940 }
2941 }
2942 else
2943 {
2944 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2945 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2946 {
2947 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2948 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2949 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2950 }
2951 }
2952
2953
2954 if (fIsNewTSS386)
2955 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2956 else
2957 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2958 if (rcStrict != VINF_SUCCESS)
2959 {
2960 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2961 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2962 return rcStrict;
2963 }
2964 }
2965
2966 /* Check the new EIP against the new CS limit. */
2967 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2968 {
2969 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2970 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2971 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2972 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2973 }
2974
2975 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2976 pVCpu->cpum.GstCtx.ss.Sel));
2977 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2978}
2979
2980
2981/**
2982 * Implements exceptions and interrupts for protected mode.
2983 *
2984 * @returns VBox strict status code.
2985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2986 * @param cbInstr The number of bytes to offset rIP by in the return
2987 * address.
2988 * @param u8Vector The interrupt / exception vector number.
2989 * @param fFlags The flags.
2990 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2991 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2992 */
2993static VBOXSTRICTRC
2994iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2995 uint8_t cbInstr,
2996 uint8_t u8Vector,
2997 uint32_t fFlags,
2998 uint16_t uErr,
2999 uint64_t uCr2) RT_NOEXCEPT
3000{
3001 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3002
3003 /*
3004 * Read the IDT entry.
3005 */
3006 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3007 {
3008 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3009 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3010 }
3011 X86DESC Idte;
3012 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3013 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3014 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3015 {
3016 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3017 return rcStrict;
3018 }
3019 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3020 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3021 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3022
3023 /*
3024 * Check the descriptor type, DPL and such.
3025 * ASSUMES this is done in the same order as described for call-gate calls.
3026 */
3027 if (Idte.Gate.u1DescType)
3028 {
3029 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3030 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3031 }
3032 bool fTaskGate = false;
3033 uint8_t f32BitGate = true;
3034 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3035 switch (Idte.Gate.u4Type)
3036 {
3037 case X86_SEL_TYPE_SYS_UNDEFINED:
3038 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3039 case X86_SEL_TYPE_SYS_LDT:
3040 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3041 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3042 case X86_SEL_TYPE_SYS_UNDEFINED2:
3043 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3044 case X86_SEL_TYPE_SYS_UNDEFINED3:
3045 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3046 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3047 case X86_SEL_TYPE_SYS_UNDEFINED4:
3048 {
3049 /** @todo check what actually happens when the type is wrong...
3050 * esp. call gates. */
3051 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3052 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3053 }
3054
3055 case X86_SEL_TYPE_SYS_286_INT_GATE:
3056 f32BitGate = false;
3057 RT_FALL_THRU();
3058 case X86_SEL_TYPE_SYS_386_INT_GATE:
3059 fEflToClear |= X86_EFL_IF;
3060 break;
3061
3062 case X86_SEL_TYPE_SYS_TASK_GATE:
3063 fTaskGate = true;
3064#ifndef IEM_IMPLEMENTS_TASKSWITCH
3065 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3066#endif
3067 break;
3068
3069 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3070 f32BitGate = false;
3071 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3072 break;
3073
3074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3075 }
3076
3077 /* Check DPL against CPL if applicable. */
3078 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3079 {
3080 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3081 {
3082 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3083 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3084 }
3085 }
3086
3087 /* Is it there? */
3088 if (!Idte.Gate.u1Present)
3089 {
3090 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3091 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3092 }
3093
3094 /* Is it a task-gate? */
3095 if (fTaskGate)
3096 {
3097 /*
3098 * Construct the error code masks based on what caused this task switch.
3099 * See Intel Instruction reference for INT.
3100 */
3101 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3102 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3103 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3104 RTSEL SelTSS = Idte.Gate.u16Sel;
3105
3106 /*
3107 * Fetch the TSS descriptor in the GDT.
3108 */
3109 IEMSELDESC DescTSS;
3110 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3111 if (rcStrict != VINF_SUCCESS)
3112 {
3113 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3114 VBOXSTRICTRC_VAL(rcStrict)));
3115 return rcStrict;
3116 }
3117
3118 /* The TSS descriptor must be a system segment and be available (not busy). */
3119 if ( DescTSS.Legacy.Gen.u1DescType
3120 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3121 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3122 {
3123 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3124 u8Vector, SelTSS, DescTSS.Legacy.au64));
3125 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3126 }
3127
3128 /* The TSS must be present. */
3129 if (!DescTSS.Legacy.Gen.u1Present)
3130 {
3131 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3132 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3133 }
3134
3135 /* Do the actual task switch. */
3136 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3137 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3138 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3139 }
3140
3141 /* A null CS is bad. */
3142 RTSEL NewCS = Idte.Gate.u16Sel;
3143 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3144 {
3145 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3146 return iemRaiseGeneralProtectionFault0(pVCpu);
3147 }
3148
3149 /* Fetch the descriptor for the new CS. */
3150 IEMSELDESC DescCS;
3151 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3152 if (rcStrict != VINF_SUCCESS)
3153 {
3154 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3155 return rcStrict;
3156 }
3157
3158 /* Must be a code segment. */
3159 if (!DescCS.Legacy.Gen.u1DescType)
3160 {
3161 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3162 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3163 }
3164 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3165 {
3166 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3167 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3168 }
3169
3170 /* Don't allow lowering the privilege level. */
3171 /** @todo Does the lowering of privileges apply to software interrupts
3172 * only? This has bearings on the more-privileged or
3173 * same-privilege stack behavior further down. A testcase would
3174 * be nice. */
3175 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3176 {
3177 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3178 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3179 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3180 }
3181
3182 /* Make sure the selector is present. */
3183 if (!DescCS.Legacy.Gen.u1Present)
3184 {
3185 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3186 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3187 }
3188
3189 /* Check the new EIP against the new CS limit. */
3190 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3191 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3192 ? Idte.Gate.u16OffsetLow
3193 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3194 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3195 if (uNewEip > cbLimitCS)
3196 {
3197 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3198 u8Vector, uNewEip, cbLimitCS, NewCS));
3199 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3200 }
3201 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3202
3203 /* Calc the flag image to push. */
3204 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3205 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3206 fEfl &= ~X86_EFL_RF;
3207 else
3208 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3209
3210 /* From V8086 mode only go to CPL 0. */
3211 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3212 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3213 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3214 {
3215 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3216 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3217 }
3218
3219 /*
3220 * If the privilege level changes, we need to get a new stack from the TSS.
3221 * This in turns means validating the new SS and ESP...
3222 */
3223 if (uNewCpl != pVCpu->iem.s.uCpl)
3224 {
3225 RTSEL NewSS;
3226 uint32_t uNewEsp;
3227 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3228 if (rcStrict != VINF_SUCCESS)
3229 return rcStrict;
3230
3231 IEMSELDESC DescSS;
3232 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3233 if (rcStrict != VINF_SUCCESS)
3234 return rcStrict;
3235 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3236 if (!DescSS.Legacy.Gen.u1DefBig)
3237 {
3238 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3239 uNewEsp = (uint16_t)uNewEsp;
3240 }
3241
3242 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3243
3244 /* Check that there is sufficient space for the stack frame. */
3245 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3246 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3247 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3248 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3249
3250 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3251 {
3252 if ( uNewEsp - 1 > cbLimitSS
3253 || uNewEsp < cbStackFrame)
3254 {
3255 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3256 u8Vector, NewSS, uNewEsp, cbStackFrame));
3257 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3258 }
3259 }
3260 else
3261 {
3262 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3263 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3264 {
3265 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3266 u8Vector, NewSS, uNewEsp, cbStackFrame));
3267 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3268 }
3269 }
3270
3271 /*
3272 * Start making changes.
3273 */
3274
3275 /* Set the new CPL so that stack accesses use it. */
3276 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3277 pVCpu->iem.s.uCpl = uNewCpl;
3278
3279 /* Create the stack frame. */
3280 RTPTRUNION uStackFrame;
3281 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3282 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3283 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3284 if (rcStrict != VINF_SUCCESS)
3285 return rcStrict;
3286 void * const pvStackFrame = uStackFrame.pv;
3287 if (f32BitGate)
3288 {
3289 if (fFlags & IEM_XCPT_FLAGS_ERR)
3290 *uStackFrame.pu32++ = uErr;
3291 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3292 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3293 uStackFrame.pu32[2] = fEfl;
3294 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3295 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3296 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3297 if (fEfl & X86_EFL_VM)
3298 {
3299 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3300 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3301 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3302 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3303 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3304 }
3305 }
3306 else
3307 {
3308 if (fFlags & IEM_XCPT_FLAGS_ERR)
3309 *uStackFrame.pu16++ = uErr;
3310 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3311 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3312 uStackFrame.pu16[2] = fEfl;
3313 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3314 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3315 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3316 if (fEfl & X86_EFL_VM)
3317 {
3318 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3319 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3320 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3321 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3322 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3323 }
3324 }
3325 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3326 if (rcStrict != VINF_SUCCESS)
3327 return rcStrict;
3328
3329 /* Mark the selectors 'accessed' (hope this is the correct time). */
3330 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3331 * after pushing the stack frame? (Write protect the gdt + stack to
3332 * find out.) */
3333 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3334 {
3335 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3336 if (rcStrict != VINF_SUCCESS)
3337 return rcStrict;
3338 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3339 }
3340
3341 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3342 {
3343 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3344 if (rcStrict != VINF_SUCCESS)
3345 return rcStrict;
3346 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3347 }
3348
3349 /*
3350 * Start comitting the register changes (joins with the DPL=CPL branch).
3351 */
3352 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3353 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3354 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3355 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3356 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3357 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3358 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3359 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3360 * SP is loaded).
3361 * Need to check the other combinations too:
3362 * - 16-bit TSS, 32-bit handler
3363 * - 32-bit TSS, 16-bit handler */
3364 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3365 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3366 else
3367 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3368
3369 if (fEfl & X86_EFL_VM)
3370 {
3371 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3372 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3373 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3374 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3375 }
3376 }
3377 /*
3378 * Same privilege, no stack change and smaller stack frame.
3379 */
3380 else
3381 {
3382 uint64_t uNewRsp;
3383 RTPTRUNION uStackFrame;
3384 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3385 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3386 if (rcStrict != VINF_SUCCESS)
3387 return rcStrict;
3388 void * const pvStackFrame = uStackFrame.pv;
3389
3390 if (f32BitGate)
3391 {
3392 if (fFlags & IEM_XCPT_FLAGS_ERR)
3393 *uStackFrame.pu32++ = uErr;
3394 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3395 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3396 uStackFrame.pu32[2] = fEfl;
3397 }
3398 else
3399 {
3400 if (fFlags & IEM_XCPT_FLAGS_ERR)
3401 *uStackFrame.pu16++ = uErr;
3402 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3403 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3404 uStackFrame.pu16[2] = fEfl;
3405 }
3406 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3407 if (rcStrict != VINF_SUCCESS)
3408 return rcStrict;
3409
3410 /* Mark the CS selector as 'accessed'. */
3411 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3412 {
3413 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3414 if (rcStrict != VINF_SUCCESS)
3415 return rcStrict;
3416 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3417 }
3418
3419 /*
3420 * Start committing the register changes (joins with the other branch).
3421 */
3422 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3423 }
3424
3425 /* ... register committing continues. */
3426 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3427 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3428 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3429 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3430 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3431 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3432
3433 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3434 fEfl &= ~fEflToClear;
3435 IEMMISC_SET_EFL(pVCpu, fEfl);
3436
3437 if (fFlags & IEM_XCPT_FLAGS_CR2)
3438 pVCpu->cpum.GstCtx.cr2 = uCr2;
3439
3440 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3441 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3442
3443 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3444}
3445
3446
3447/**
3448 * Implements exceptions and interrupts for long mode.
3449 *
3450 * @returns VBox strict status code.
3451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3452 * @param cbInstr The number of bytes to offset rIP by in the return
3453 * address.
3454 * @param u8Vector The interrupt / exception vector number.
3455 * @param fFlags The flags.
3456 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3457 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3458 */
3459static VBOXSTRICTRC
3460iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3461 uint8_t cbInstr,
3462 uint8_t u8Vector,
3463 uint32_t fFlags,
3464 uint16_t uErr,
3465 uint64_t uCr2) RT_NOEXCEPT
3466{
3467 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3468
3469 /*
3470 * Read the IDT entry.
3471 */
3472 uint16_t offIdt = (uint16_t)u8Vector << 4;
3473 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3474 {
3475 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3476 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3477 }
3478 X86DESC64 Idte;
3479#ifdef _MSC_VER /* Shut up silly compiler warning. */
3480 Idte.au64[0] = 0;
3481 Idte.au64[1] = 0;
3482#endif
3483 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3484 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3485 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3486 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3487 {
3488 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3489 return rcStrict;
3490 }
3491 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3492 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3493 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3494
3495 /*
3496 * Check the descriptor type, DPL and such.
3497 * ASSUMES this is done in the same order as described for call-gate calls.
3498 */
3499 if (Idte.Gate.u1DescType)
3500 {
3501 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3502 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3503 }
3504 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3505 switch (Idte.Gate.u4Type)
3506 {
3507 case AMD64_SEL_TYPE_SYS_INT_GATE:
3508 fEflToClear |= X86_EFL_IF;
3509 break;
3510 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3511 break;
3512
3513 default:
3514 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3515 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3516 }
3517
3518 /* Check DPL against CPL if applicable. */
3519 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3520 {
3521 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3522 {
3523 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3524 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3525 }
3526 }
3527
3528 /* Is it there? */
3529 if (!Idte.Gate.u1Present)
3530 {
3531 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3532 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3533 }
3534
3535 /* A null CS is bad. */
3536 RTSEL NewCS = Idte.Gate.u16Sel;
3537 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3538 {
3539 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3540 return iemRaiseGeneralProtectionFault0(pVCpu);
3541 }
3542
3543 /* Fetch the descriptor for the new CS. */
3544 IEMSELDESC DescCS;
3545 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3546 if (rcStrict != VINF_SUCCESS)
3547 {
3548 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3549 return rcStrict;
3550 }
3551
3552 /* Must be a 64-bit code segment. */
3553 if (!DescCS.Long.Gen.u1DescType)
3554 {
3555 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3556 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3557 }
3558 if ( !DescCS.Long.Gen.u1Long
3559 || DescCS.Long.Gen.u1DefBig
3560 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3561 {
3562 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3563 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3564 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3565 }
3566
3567 /* Don't allow lowering the privilege level. For non-conforming CS
3568 selectors, the CS.DPL sets the privilege level the trap/interrupt
3569 handler runs at. For conforming CS selectors, the CPL remains
3570 unchanged, but the CS.DPL must be <= CPL. */
3571 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3572 * when CPU in Ring-0. Result \#GP? */
3573 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3574 {
3575 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3576 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3577 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3578 }
3579
3580
3581 /* Make sure the selector is present. */
3582 if (!DescCS.Legacy.Gen.u1Present)
3583 {
3584 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3585 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3586 }
3587
3588 /* Check that the new RIP is canonical. */
3589 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3590 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3591 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3592 if (!IEM_IS_CANONICAL(uNewRip))
3593 {
3594 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3595 return iemRaiseGeneralProtectionFault0(pVCpu);
3596 }
3597
3598 /*
3599 * If the privilege level changes or if the IST isn't zero, we need to get
3600 * a new stack from the TSS.
3601 */
3602 uint64_t uNewRsp;
3603 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3604 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3605 if ( uNewCpl != pVCpu->iem.s.uCpl
3606 || Idte.Gate.u3IST != 0)
3607 {
3608 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3609 if (rcStrict != VINF_SUCCESS)
3610 return rcStrict;
3611 }
3612 else
3613 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3614 uNewRsp &= ~(uint64_t)0xf;
3615
3616 /*
3617 * Calc the flag image to push.
3618 */
3619 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3620 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3621 fEfl &= ~X86_EFL_RF;
3622 else
3623 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3624
3625 /*
3626 * Start making changes.
3627 */
3628 /* Set the new CPL so that stack accesses use it. */
3629 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3630 pVCpu->iem.s.uCpl = uNewCpl;
3631
3632 /* Create the stack frame. */
3633 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3634 RTPTRUNION uStackFrame;
3635 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3636 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3637 if (rcStrict != VINF_SUCCESS)
3638 return rcStrict;
3639 void * const pvStackFrame = uStackFrame.pv;
3640
3641 if (fFlags & IEM_XCPT_FLAGS_ERR)
3642 *uStackFrame.pu64++ = uErr;
3643 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3644 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3645 uStackFrame.pu64[2] = fEfl;
3646 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3647 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3648 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3649 if (rcStrict != VINF_SUCCESS)
3650 return rcStrict;
3651
3652 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3653 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3654 * after pushing the stack frame? (Write protect the gdt + stack to
3655 * find out.) */
3656 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3657 {
3658 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3659 if (rcStrict != VINF_SUCCESS)
3660 return rcStrict;
3661 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3662 }
3663
3664 /*
3665 * Start comitting the register changes.
3666 */
3667 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3668 * hidden registers when interrupting 32-bit or 16-bit code! */
3669 if (uNewCpl != uOldCpl)
3670 {
3671 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3672 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3673 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3674 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3675 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3676 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3677 }
3678 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3679 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3680 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3681 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3682 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3683 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3684 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3685 pVCpu->cpum.GstCtx.rip = uNewRip;
3686
3687 fEfl &= ~fEflToClear;
3688 IEMMISC_SET_EFL(pVCpu, fEfl);
3689
3690 if (fFlags & IEM_XCPT_FLAGS_CR2)
3691 pVCpu->cpum.GstCtx.cr2 = uCr2;
3692
3693 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3694 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3695
3696 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3697}
3698
3699
3700/**
3701 * Implements exceptions and interrupts.
3702 *
3703 * All exceptions and interrupts goes thru this function!
3704 *
3705 * @returns VBox strict status code.
3706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3707 * @param cbInstr The number of bytes to offset rIP by in the return
3708 * address.
3709 * @param u8Vector The interrupt / exception vector number.
3710 * @param fFlags The flags.
3711 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3712 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3713 */
3714VBOXSTRICTRC
3715iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3716 uint8_t cbInstr,
3717 uint8_t u8Vector,
3718 uint32_t fFlags,
3719 uint16_t uErr,
3720 uint64_t uCr2) RT_NOEXCEPT
3721{
3722 /*
3723 * Get all the state that we might need here.
3724 */
3725 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3726 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3727
3728#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3729 /*
3730 * Flush prefetch buffer
3731 */
3732 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3733#endif
3734
3735 /*
3736 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3737 */
3738 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3739 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3740 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3741 | IEM_XCPT_FLAGS_BP_INSTR
3742 | IEM_XCPT_FLAGS_ICEBP_INSTR
3743 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3744 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3745 {
3746 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3747 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3748 u8Vector = X86_XCPT_GP;
3749 uErr = 0;
3750 }
3751#ifdef DBGFTRACE_ENABLED
3752 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3753 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3754 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3755#endif
3756
3757 /*
3758 * Evaluate whether NMI blocking should be in effect.
3759 * Normally, NMI blocking is in effect whenever we inject an NMI.
3760 */
3761 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3762 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3763
3764#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3765 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3766 {
3767 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3768 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3769 return rcStrict0;
3770
3771 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3772 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3773 {
3774 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3775 fBlockNmi = false;
3776 }
3777 }
3778#endif
3779
3780#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3781 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3782 {
3783 /*
3784 * If the event is being injected as part of VMRUN, it isn't subject to event
3785 * intercepts in the nested-guest. However, secondary exceptions that occur
3786 * during injection of any event -are- subject to exception intercepts.
3787 *
3788 * See AMD spec. 15.20 "Event Injection".
3789 */
3790 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3791 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3792 else
3793 {
3794 /*
3795 * Check and handle if the event being raised is intercepted.
3796 */
3797 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3798 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3799 return rcStrict0;
3800 }
3801 }
3802#endif
3803
3804 /*
3805 * Set NMI blocking if necessary.
3806 */
3807 if (fBlockNmi)
3808 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3809
3810 /*
3811 * Do recursion accounting.
3812 */
3813 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3814 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3815 if (pVCpu->iem.s.cXcptRecursions == 0)
3816 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3817 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3818 else
3819 {
3820 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3821 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3822 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3823
3824 if (pVCpu->iem.s.cXcptRecursions >= 4)
3825 {
3826#ifdef DEBUG_bird
3827 AssertFailed();
3828#endif
3829 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3830 }
3831
3832 /*
3833 * Evaluate the sequence of recurring events.
3834 */
3835 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3836 NULL /* pXcptRaiseInfo */);
3837 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3838 { /* likely */ }
3839 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3840 {
3841 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3842 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3843 u8Vector = X86_XCPT_DF;
3844 uErr = 0;
3845#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3846 /* VMX nested-guest #DF intercept needs to be checked here. */
3847 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3848 {
3849 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3850 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3851 return rcStrict0;
3852 }
3853#endif
3854 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3855 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3856 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3857 }
3858 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3859 {
3860 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3861 return iemInitiateCpuShutdown(pVCpu);
3862 }
3863 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3864 {
3865 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3866 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3867 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3868 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3869 return VERR_EM_GUEST_CPU_HANG;
3870 }
3871 else
3872 {
3873 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3874 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3875 return VERR_IEM_IPE_9;
3876 }
3877
3878 /*
3879 * The 'EXT' bit is set when an exception occurs during deliver of an external
3880 * event (such as an interrupt or earlier exception)[1]. Privileged software
3881 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3882 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3883 *
3884 * [1] - Intel spec. 6.13 "Error Code"
3885 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3886 * [3] - Intel Instruction reference for INT n.
3887 */
3888 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3889 && (fFlags & IEM_XCPT_FLAGS_ERR)
3890 && u8Vector != X86_XCPT_PF
3891 && u8Vector != X86_XCPT_DF)
3892 {
3893 uErr |= X86_TRAP_ERR_EXTERNAL;
3894 }
3895 }
3896
3897 pVCpu->iem.s.cXcptRecursions++;
3898 pVCpu->iem.s.uCurXcpt = u8Vector;
3899 pVCpu->iem.s.fCurXcpt = fFlags;
3900 pVCpu->iem.s.uCurXcptErr = uErr;
3901 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3902
3903 /*
3904 * Extensive logging.
3905 */
3906#if defined(LOG_ENABLED) && defined(IN_RING3)
3907 if (LogIs3Enabled())
3908 {
3909 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3910 PVM pVM = pVCpu->CTX_SUFF(pVM);
3911 char szRegs[4096];
3912 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3913 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3914 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3915 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3916 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3917 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3918 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3919 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3920 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3921 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3922 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3923 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3924 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3925 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3926 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3927 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3928 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3929 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3930 " efer=%016VR{efer}\n"
3931 " pat=%016VR{pat}\n"
3932 " sf_mask=%016VR{sf_mask}\n"
3933 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3934 " lstar=%016VR{lstar}\n"
3935 " star=%016VR{star} cstar=%016VR{cstar}\n"
3936 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3937 );
3938
3939 char szInstr[256];
3940 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3941 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3942 szInstr, sizeof(szInstr), NULL);
3943 Log3(("%s%s\n", szRegs, szInstr));
3944 }
3945#endif /* LOG_ENABLED */
3946
3947 /*
3948 * Stats.
3949 */
3950 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3951 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3952 else if (u8Vector <= X86_XCPT_LAST)
3953 {
3954 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3955 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3956 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3957 }
3958
3959 /*
3960 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3961 * to ensure that a stale TLB or paging cache entry will only cause one
3962 * spurious #PF.
3963 */
3964 if ( u8Vector == X86_XCPT_PF
3965 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3966 IEMTlbInvalidatePage(pVCpu, uCr2);
3967
3968 /*
3969 * Call the mode specific worker function.
3970 */
3971 VBOXSTRICTRC rcStrict;
3972 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3973 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3974 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3975 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3976 else
3977 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3978
3979 /* Flush the prefetch buffer. */
3980#ifdef IEM_WITH_CODE_TLB
3981 pVCpu->iem.s.pbInstrBuf = NULL;
3982#else
3983 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3984#endif
3985
3986 /*
3987 * Unwind.
3988 */
3989 pVCpu->iem.s.cXcptRecursions--;
3990 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3991 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3992 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3993 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3994 pVCpu->iem.s.cXcptRecursions + 1));
3995 return rcStrict;
3996}
3997
3998#ifdef IEM_WITH_SETJMP
3999/**
4000 * See iemRaiseXcptOrInt. Will not return.
4001 */
4002DECL_NO_RETURN(void)
4003iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4004 uint8_t cbInstr,
4005 uint8_t u8Vector,
4006 uint32_t fFlags,
4007 uint16_t uErr,
4008 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4009{
4010 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4011 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4012}
4013#endif
4014
4015
4016/** \#DE - 00. */
4017VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4018{
4019 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4020}
4021
4022
4023/** \#DB - 01.
4024 * @note This automatically clear DR7.GD. */
4025VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4026{
4027 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4028 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4029 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4030}
4031
4032
4033/** \#BR - 05. */
4034VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4035{
4036 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4037}
4038
4039
4040/** \#UD - 06. */
4041VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4042{
4043 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4044}
4045
4046
4047/** \#NM - 07. */
4048VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4049{
4050 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4051}
4052
4053
4054/** \#TS(err) - 0a. */
4055VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4056{
4057 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4058}
4059
4060
4061/** \#TS(tr) - 0a. */
4062VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4063{
4064 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4065 pVCpu->cpum.GstCtx.tr.Sel, 0);
4066}
4067
4068
4069/** \#TS(0) - 0a. */
4070VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4071{
4072 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4073 0, 0);
4074}
4075
4076
4077/** \#TS(err) - 0a. */
4078VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4079{
4080 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4081 uSel & X86_SEL_MASK_OFF_RPL, 0);
4082}
4083
4084
4085/** \#NP(err) - 0b. */
4086VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4087{
4088 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4089}
4090
4091
4092/** \#NP(sel) - 0b. */
4093VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4094{
4095 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4096 uSel & ~X86_SEL_RPL, 0);
4097}
4098
4099
4100/** \#SS(seg) - 0c. */
4101VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4102{
4103 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4104 uSel & ~X86_SEL_RPL, 0);
4105}
4106
4107
4108/** \#SS(err) - 0c. */
4109VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4110{
4111 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4112}
4113
4114
4115/** \#GP(n) - 0d. */
4116VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4117{
4118 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4119}
4120
4121
4122/** \#GP(0) - 0d. */
4123VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4124{
4125 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4126}
4127
4128#ifdef IEM_WITH_SETJMP
4129/** \#GP(0) - 0d. */
4130DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4131{
4132 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4133}
4134#endif
4135
4136
4137/** \#GP(sel) - 0d. */
4138VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4139{
4140 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4141 Sel & ~X86_SEL_RPL, 0);
4142}
4143
4144
4145/** \#GP(0) - 0d. */
4146VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4149}
4150
4151
4152/** \#GP(sel) - 0d. */
4153VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4154{
4155 NOREF(iSegReg); NOREF(fAccess);
4156 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4157 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4158}
4159
4160#ifdef IEM_WITH_SETJMP
4161/** \#GP(sel) - 0d, longjmp. */
4162DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4163{
4164 NOREF(iSegReg); NOREF(fAccess);
4165 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4166 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4167}
4168#endif
4169
4170/** \#GP(sel) - 0d. */
4171VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4172{
4173 NOREF(Sel);
4174 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4175}
4176
4177#ifdef IEM_WITH_SETJMP
4178/** \#GP(sel) - 0d, longjmp. */
4179DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4180{
4181 NOREF(Sel);
4182 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4183}
4184#endif
4185
4186
4187/** \#GP(sel) - 0d. */
4188VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4189{
4190 NOREF(iSegReg); NOREF(fAccess);
4191 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4192}
4193
4194#ifdef IEM_WITH_SETJMP
4195/** \#GP(sel) - 0d, longjmp. */
4196DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4197{
4198 NOREF(iSegReg); NOREF(fAccess);
4199 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4200}
4201#endif
4202
4203
4204/** \#PF(n) - 0e. */
4205VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4206{
4207 uint16_t uErr;
4208 switch (rc)
4209 {
4210 case VERR_PAGE_NOT_PRESENT:
4211 case VERR_PAGE_TABLE_NOT_PRESENT:
4212 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4213 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4214 uErr = 0;
4215 break;
4216
4217 default:
4218 AssertMsgFailed(("%Rrc\n", rc));
4219 RT_FALL_THRU();
4220 case VERR_ACCESS_DENIED:
4221 uErr = X86_TRAP_PF_P;
4222 break;
4223
4224 /** @todo reserved */
4225 }
4226
4227 if (pVCpu->iem.s.uCpl == 3)
4228 uErr |= X86_TRAP_PF_US;
4229
4230 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4231 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4232 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4233 uErr |= X86_TRAP_PF_ID;
4234
4235#if 0 /* This is so much non-sense, really. Why was it done like that? */
4236 /* Note! RW access callers reporting a WRITE protection fault, will clear
4237 the READ flag before calling. So, read-modify-write accesses (RW)
4238 can safely be reported as READ faults. */
4239 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4240 uErr |= X86_TRAP_PF_RW;
4241#else
4242 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4243 {
4244 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4245 /// (regardless of outcome of the comparison in the latter case).
4246 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4247 uErr |= X86_TRAP_PF_RW;
4248 }
4249#endif
4250
4251 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4252 of the memory operand rather than at the start of it. (Not sure what
4253 happens if it crosses a page boundrary.) The current heuristics for
4254 this is to report the #PF for the last byte if the access is more than
4255 64 bytes. This is probably not correct, but we can work that out later,
4256 main objective now is to get FXSAVE to work like for real hardware and
4257 make bs3-cpu-basic2 work. */
4258 if (cbAccess <= 64)
4259 { /* likely*/ }
4260 else
4261 GCPtrWhere += cbAccess - 1;
4262
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4264 uErr, GCPtrWhere);
4265}
4266
4267#ifdef IEM_WITH_SETJMP
4268/** \#PF(n) - 0e, longjmp. */
4269DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4270 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4271{
4272 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4273}
4274#endif
4275
4276
4277/** \#MF(0) - 10. */
4278VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4279{
4280 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4281 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4282
4283 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4284 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4285 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4286}
4287
4288
4289/** \#AC(0) - 11. */
4290VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4291{
4292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4293}
4294
4295#ifdef IEM_WITH_SETJMP
4296/** \#AC(0) - 11, longjmp. */
4297DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4298{
4299 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4300}
4301#endif
4302
4303
4304/** \#XF(0)/\#XM(0) - 19. */
4305VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4306{
4307 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4308}
4309
4310
4311/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4312IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4313{
4314 NOREF(cbInstr);
4315 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4316}
4317
4318
4319/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4320IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4321{
4322 NOREF(cbInstr);
4323 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4324}
4325
4326
4327/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4328IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4329{
4330 NOREF(cbInstr);
4331 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4332}
4333
4334
4335/** @} */
4336
4337/** @name Common opcode decoders.
4338 * @{
4339 */
4340//#include <iprt/mem.h>
4341
4342/**
4343 * Used to add extra details about a stub case.
4344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4345 */
4346void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4347{
4348#if defined(LOG_ENABLED) && defined(IN_RING3)
4349 PVM pVM = pVCpu->CTX_SUFF(pVM);
4350 char szRegs[4096];
4351 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4352 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4353 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4354 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4355 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4356 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4357 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4358 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4359 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4360 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4361 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4362 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4363 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4364 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4365 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4366 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4367 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4368 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4369 " efer=%016VR{efer}\n"
4370 " pat=%016VR{pat}\n"
4371 " sf_mask=%016VR{sf_mask}\n"
4372 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4373 " lstar=%016VR{lstar}\n"
4374 " star=%016VR{star} cstar=%016VR{cstar}\n"
4375 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4376 );
4377
4378 char szInstr[256];
4379 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4380 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4381 szInstr, sizeof(szInstr), NULL);
4382
4383 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4384#else
4385 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4386#endif
4387}
4388
4389/** @} */
4390
4391
4392
4393/** @name Register Access.
4394 * @{
4395 */
4396
4397/**
4398 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4399 *
4400 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4401 * segment limit.
4402 *
4403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4404 * @param cbInstr Instruction size.
4405 * @param offNextInstr The offset of the next instruction.
4406 * @param enmEffOpSize Effective operand size.
4407 */
4408VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4409 IEMMODE enmEffOpSize) RT_NOEXCEPT
4410{
4411 switch (enmEffOpSize)
4412 {
4413 case IEMMODE_16BIT:
4414 {
4415 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4416 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4417 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no CS limit checks in 64-bit mode */))
4418 pVCpu->cpum.GstCtx.rip = uNewIp;
4419 else
4420 return iemRaiseGeneralProtectionFault0(pVCpu);
4421 break;
4422 }
4423
4424 case IEMMODE_32BIT:
4425 {
4426 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4427 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4428
4429 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4430 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4431 pVCpu->cpum.GstCtx.rip = uNewEip;
4432 else
4433 return iemRaiseGeneralProtectionFault0(pVCpu);
4434 break;
4435 }
4436
4437 case IEMMODE_64BIT:
4438 {
4439 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4440
4441 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4442 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4443 pVCpu->cpum.GstCtx.rip = uNewRip;
4444 else
4445 return iemRaiseGeneralProtectionFault0(pVCpu);
4446 break;
4447 }
4448
4449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4450 }
4451
4452#ifndef IEM_WITH_CODE_TLB
4453 /* Flush the prefetch buffer. */
4454 pVCpu->iem.s.cbOpcode = cbInstr;
4455#endif
4456
4457 /*
4458 * Clear RF and finish the instruction (maybe raise #DB).
4459 */
4460 return iemRegFinishClearingRF(pVCpu);
4461}
4462
4463
4464/**
4465 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4466 *
4467 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4468 * segment limit.
4469 *
4470 * @returns Strict VBox status code.
4471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4472 * @param cbInstr Instruction size.
4473 * @param offNextInstr The offset of the next instruction.
4474 */
4475VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4476{
4477 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4478
4479 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4480 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4481 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checking in 64-bit mode */))
4482 pVCpu->cpum.GstCtx.rip = uNewIp;
4483 else
4484 return iemRaiseGeneralProtectionFault0(pVCpu);
4485
4486#ifndef IEM_WITH_CODE_TLB
4487 /* Flush the prefetch buffer. */
4488 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4489#endif
4490
4491 /*
4492 * Clear RF and finish the instruction (maybe raise #DB).
4493 */
4494 return iemRegFinishClearingRF(pVCpu);
4495}
4496
4497
4498/**
4499 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4500 *
4501 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4502 * segment limit.
4503 *
4504 * @returns Strict VBox status code.
4505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4506 * @param cbInstr Instruction size.
4507 * @param offNextInstr The offset of the next instruction.
4508 * @param enmEffOpSize Effective operand size.
4509 */
4510VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4511 IEMMODE enmEffOpSize) RT_NOEXCEPT
4512{
4513 if (enmEffOpSize == IEMMODE_32BIT)
4514 {
4515 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4516
4517 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4518 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4519 pVCpu->cpum.GstCtx.rip = uNewEip;
4520 else
4521 return iemRaiseGeneralProtectionFault0(pVCpu);
4522 }
4523 else
4524 {
4525 Assert(enmEffOpSize == IEMMODE_64BIT);
4526
4527 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4528 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4529 pVCpu->cpum.GstCtx.rip = uNewRip;
4530 else
4531 return iemRaiseGeneralProtectionFault0(pVCpu);
4532 }
4533
4534#ifndef IEM_WITH_CODE_TLB
4535 /* Flush the prefetch buffer. */
4536 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4537#endif
4538
4539 /*
4540 * Clear RF and finish the instruction (maybe raise #DB).
4541 */
4542 return iemRegFinishClearingRF(pVCpu);
4543}
4544
4545
4546/**
4547 * Performs a near jump to the specified address.
4548 *
4549 * May raise a \#GP(0) if the new IP outside the code segment limit.
4550 *
4551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4552 * @param uNewIp The new IP value.
4553 */
4554VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4555{
4556 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4557 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checks in 64-bit mode */))
4558 pVCpu->cpum.GstCtx.rip = uNewIp;
4559 else
4560 return iemRaiseGeneralProtectionFault0(pVCpu);
4561 /** @todo Test 16-bit jump in 64-bit mode. */
4562
4563#ifndef IEM_WITH_CODE_TLB
4564 /* Flush the prefetch buffer. */
4565 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4566#endif
4567
4568 /*
4569 * Clear RF and finish the instruction (maybe raise #DB).
4570 */
4571 return iemRegFinishClearingRF(pVCpu);
4572}
4573
4574
4575/**
4576 * Performs a near jump to the specified address.
4577 *
4578 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4579 *
4580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4581 * @param uNewEip The new EIP value.
4582 */
4583VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4584{
4585 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4586 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4587
4588 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4589 pVCpu->cpum.GstCtx.rip = uNewEip;
4590 else
4591 return iemRaiseGeneralProtectionFault0(pVCpu);
4592
4593#ifndef IEM_WITH_CODE_TLB
4594 /* Flush the prefetch buffer. */
4595 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4596#endif
4597
4598 /*
4599 * Clear RF and finish the instruction (maybe raise #DB).
4600 */
4601 return iemRegFinishClearingRF(pVCpu);
4602}
4603
4604
4605/**
4606 * Performs a near jump to the specified address.
4607 *
4608 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4609 * segment limit.
4610 *
4611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4612 * @param uNewRip The new RIP value.
4613 */
4614VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4615{
4616 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4617
4618 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4619 pVCpu->cpum.GstCtx.rip = uNewRip;
4620 else
4621 return iemRaiseGeneralProtectionFault0(pVCpu);
4622
4623#ifndef IEM_WITH_CODE_TLB
4624 /* Flush the prefetch buffer. */
4625 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4626#endif
4627
4628 /*
4629 * Clear RF and finish the instruction (maybe raise #DB).
4630 */
4631 return iemRegFinishClearingRF(pVCpu);
4632}
4633
4634/** @} */
4635
4636
4637/** @name FPU access and helpers.
4638 *
4639 * @{
4640 */
4641
4642/**
4643 * Updates the x87.DS and FPUDP registers.
4644 *
4645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4646 * @param pFpuCtx The FPU context.
4647 * @param iEffSeg The effective segment register.
4648 * @param GCPtrEff The effective address relative to @a iEffSeg.
4649 */
4650DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4651{
4652 RTSEL sel;
4653 switch (iEffSeg)
4654 {
4655 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4656 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4657 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4658 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4659 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4660 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4661 default:
4662 AssertMsgFailed(("%d\n", iEffSeg));
4663 sel = pVCpu->cpum.GstCtx.ds.Sel;
4664 }
4665 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4666 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4667 {
4668 pFpuCtx->DS = 0;
4669 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4670 }
4671 else if (!IEM_IS_LONG_MODE(pVCpu))
4672 {
4673 pFpuCtx->DS = sel;
4674 pFpuCtx->FPUDP = GCPtrEff;
4675 }
4676 else
4677 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4678}
4679
4680
4681/**
4682 * Rotates the stack registers in the push direction.
4683 *
4684 * @param pFpuCtx The FPU context.
4685 * @remarks This is a complete waste of time, but fxsave stores the registers in
4686 * stack order.
4687 */
4688DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4689{
4690 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4691 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4692 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4693 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4694 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4695 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4696 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4697 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4698 pFpuCtx->aRegs[0].r80 = r80Tmp;
4699}
4700
4701
4702/**
4703 * Rotates the stack registers in the pop direction.
4704 *
4705 * @param pFpuCtx The FPU context.
4706 * @remarks This is a complete waste of time, but fxsave stores the registers in
4707 * stack order.
4708 */
4709DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4710{
4711 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4712 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4713 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4714 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4715 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4716 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4717 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4718 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4719 pFpuCtx->aRegs[7].r80 = r80Tmp;
4720}
4721
4722
4723/**
4724 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4725 * exception prevents it.
4726 *
4727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4728 * @param pResult The FPU operation result to push.
4729 * @param pFpuCtx The FPU context.
4730 */
4731static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4732{
4733 /* Update FSW and bail if there are pending exceptions afterwards. */
4734 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4735 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4736 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4737 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4738 {
4739 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4740 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4741 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4742 pFpuCtx->FSW = fFsw;
4743 return;
4744 }
4745
4746 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4747 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4748 {
4749 /* All is fine, push the actual value. */
4750 pFpuCtx->FTW |= RT_BIT(iNewTop);
4751 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4752 }
4753 else if (pFpuCtx->FCW & X86_FCW_IM)
4754 {
4755 /* Masked stack overflow, push QNaN. */
4756 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4757 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4758 }
4759 else
4760 {
4761 /* Raise stack overflow, don't push anything. */
4762 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4763 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4764 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4765 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4766 return;
4767 }
4768
4769 fFsw &= ~X86_FSW_TOP_MASK;
4770 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4771 pFpuCtx->FSW = fFsw;
4772
4773 iemFpuRotateStackPush(pFpuCtx);
4774 RT_NOREF(pVCpu);
4775}
4776
4777
4778/**
4779 * Stores a result in a FPU register and updates the FSW and FTW.
4780 *
4781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4782 * @param pFpuCtx The FPU context.
4783 * @param pResult The result to store.
4784 * @param iStReg Which FPU register to store it in.
4785 */
4786static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4787{
4788 Assert(iStReg < 8);
4789 uint16_t fNewFsw = pFpuCtx->FSW;
4790 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4791 fNewFsw &= ~X86_FSW_C_MASK;
4792 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4793 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4794 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4795 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4796 pFpuCtx->FSW = fNewFsw;
4797 pFpuCtx->FTW |= RT_BIT(iReg);
4798 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4799 RT_NOREF(pVCpu);
4800}
4801
4802
4803/**
4804 * Only updates the FPU status word (FSW) with the result of the current
4805 * instruction.
4806 *
4807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4808 * @param pFpuCtx The FPU context.
4809 * @param u16FSW The FSW output of the current instruction.
4810 */
4811static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4812{
4813 uint16_t fNewFsw = pFpuCtx->FSW;
4814 fNewFsw &= ~X86_FSW_C_MASK;
4815 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4816 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4817 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4818 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4819 pFpuCtx->FSW = fNewFsw;
4820 RT_NOREF(pVCpu);
4821}
4822
4823
4824/**
4825 * Pops one item off the FPU stack if no pending exception prevents it.
4826 *
4827 * @param pFpuCtx The FPU context.
4828 */
4829static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4830{
4831 /* Check pending exceptions. */
4832 uint16_t uFSW = pFpuCtx->FSW;
4833 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4834 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4835 return;
4836
4837 /* TOP--. */
4838 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4839 uFSW &= ~X86_FSW_TOP_MASK;
4840 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4841 pFpuCtx->FSW = uFSW;
4842
4843 /* Mark the previous ST0 as empty. */
4844 iOldTop >>= X86_FSW_TOP_SHIFT;
4845 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4846
4847 /* Rotate the registers. */
4848 iemFpuRotateStackPop(pFpuCtx);
4849}
4850
4851
4852/**
4853 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4854 *
4855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4856 * @param pResult The FPU operation result to push.
4857 */
4858void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4859{
4860 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4861 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4862 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4863}
4864
4865
4866/**
4867 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4868 * and sets FPUDP and FPUDS.
4869 *
4870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4871 * @param pResult The FPU operation result to push.
4872 * @param iEffSeg The effective segment register.
4873 * @param GCPtrEff The effective address relative to @a iEffSeg.
4874 */
4875void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4876{
4877 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4878 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4879 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4880 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4881}
4882
4883
4884/**
4885 * Replace ST0 with the first value and push the second onto the FPU stack,
4886 * unless a pending exception prevents it.
4887 *
4888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4889 * @param pResult The FPU operation result to store and push.
4890 */
4891void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4892{
4893 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4894 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4895
4896 /* Update FSW and bail if there are pending exceptions afterwards. */
4897 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4898 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4899 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4900 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4901 {
4902 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4903 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4904 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4905 pFpuCtx->FSW = fFsw;
4906 return;
4907 }
4908
4909 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4910 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4911 {
4912 /* All is fine, push the actual value. */
4913 pFpuCtx->FTW |= RT_BIT(iNewTop);
4914 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4915 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4916 }
4917 else if (pFpuCtx->FCW & X86_FCW_IM)
4918 {
4919 /* Masked stack overflow, push QNaN. */
4920 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4921 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4922 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4923 }
4924 else
4925 {
4926 /* Raise stack overflow, don't push anything. */
4927 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4928 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4929 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4930 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4931 return;
4932 }
4933
4934 fFsw &= ~X86_FSW_TOP_MASK;
4935 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4936 pFpuCtx->FSW = fFsw;
4937
4938 iemFpuRotateStackPush(pFpuCtx);
4939}
4940
4941
4942/**
4943 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4944 * FOP.
4945 *
4946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4947 * @param pResult The result to store.
4948 * @param iStReg Which FPU register to store it in.
4949 */
4950void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4951{
4952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4953 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4954 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4955}
4956
4957
4958/**
4959 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4960 * FOP, and then pops the stack.
4961 *
4962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4963 * @param pResult The result to store.
4964 * @param iStReg Which FPU register to store it in.
4965 */
4966void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4967{
4968 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4969 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4970 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4971 iemFpuMaybePopOne(pFpuCtx);
4972}
4973
4974
4975/**
4976 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4977 * FPUDP, and FPUDS.
4978 *
4979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4980 * @param pResult The result to store.
4981 * @param iStReg Which FPU register to store it in.
4982 * @param iEffSeg The effective memory operand selector register.
4983 * @param GCPtrEff The effective memory operand offset.
4984 */
4985void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4986 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4987{
4988 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4989 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4990 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4991 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4992}
4993
4994
4995/**
4996 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4997 * FPUDP, and FPUDS, and then pops the stack.
4998 *
4999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5000 * @param pResult The result to store.
5001 * @param iStReg Which FPU register to store it in.
5002 * @param iEffSeg The effective memory operand selector register.
5003 * @param GCPtrEff The effective memory operand offset.
5004 */
5005void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5006 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5007{
5008 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5009 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5010 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5011 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5012 iemFpuMaybePopOne(pFpuCtx);
5013}
5014
5015
5016/**
5017 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5018 *
5019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5020 */
5021void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5022{
5023 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5024 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5025}
5026
5027
5028/**
5029 * Updates the FSW, FOP, FPUIP, and FPUCS.
5030 *
5031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5032 * @param u16FSW The FSW from the current instruction.
5033 */
5034void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5035{
5036 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5037 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5038 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5039}
5040
5041
5042/**
5043 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5044 *
5045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5046 * @param u16FSW The FSW from the current instruction.
5047 */
5048void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5049{
5050 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5051 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5052 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5053 iemFpuMaybePopOne(pFpuCtx);
5054}
5055
5056
5057/**
5058 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5059 *
5060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5061 * @param u16FSW The FSW from the current instruction.
5062 * @param iEffSeg The effective memory operand selector register.
5063 * @param GCPtrEff The effective memory operand offset.
5064 */
5065void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5066{
5067 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5068 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5069 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5070 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5071}
5072
5073
5074/**
5075 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param u16FSW The FSW from the current instruction.
5079 */
5080void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5081{
5082 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5083 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5084 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5085 iemFpuMaybePopOne(pFpuCtx);
5086 iemFpuMaybePopOne(pFpuCtx);
5087}
5088
5089
5090/**
5091 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5092 *
5093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5094 * @param u16FSW The FSW from the current instruction.
5095 * @param iEffSeg The effective memory operand selector register.
5096 * @param GCPtrEff The effective memory operand offset.
5097 */
5098void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5099{
5100 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5101 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5102 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5103 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5104 iemFpuMaybePopOne(pFpuCtx);
5105}
5106
5107
5108/**
5109 * Worker routine for raising an FPU stack underflow exception.
5110 *
5111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5112 * @param pFpuCtx The FPU context.
5113 * @param iStReg The stack register being accessed.
5114 */
5115static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5116{
5117 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5118 if (pFpuCtx->FCW & X86_FCW_IM)
5119 {
5120 /* Masked underflow. */
5121 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5122 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5123 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5124 if (iStReg != UINT8_MAX)
5125 {
5126 pFpuCtx->FTW |= RT_BIT(iReg);
5127 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5128 }
5129 }
5130 else
5131 {
5132 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5133 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5134 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5135 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5136 }
5137 RT_NOREF(pVCpu);
5138}
5139
5140
5141/**
5142 * Raises a FPU stack underflow exception.
5143 *
5144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5145 * @param iStReg The destination register that should be loaded
5146 * with QNaN if \#IS is not masked. Specify
5147 * UINT8_MAX if none (like for fcom).
5148 */
5149void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5150{
5151 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5152 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5153 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5154}
5155
5156
5157void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5158{
5159 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5160 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5161 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5162 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5163}
5164
5165
5166void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5167{
5168 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5169 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5170 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5171 iemFpuMaybePopOne(pFpuCtx);
5172}
5173
5174
5175void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5176{
5177 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5178 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5179 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5180 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5181 iemFpuMaybePopOne(pFpuCtx);
5182}
5183
5184
5185void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5189 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5196{
5197 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5198 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5199
5200 if (pFpuCtx->FCW & X86_FCW_IM)
5201 {
5202 /* Masked overflow - Push QNaN. */
5203 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5204 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5205 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5206 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5207 pFpuCtx->FTW |= RT_BIT(iNewTop);
5208 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5209 iemFpuRotateStackPush(pFpuCtx);
5210 }
5211 else
5212 {
5213 /* Exception pending - don't change TOP or the register stack. */
5214 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5215 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5216 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5217 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5218 }
5219}
5220
5221
5222void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5223{
5224 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5225 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5226
5227 if (pFpuCtx->FCW & X86_FCW_IM)
5228 {
5229 /* Masked overflow - Push QNaN. */
5230 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5231 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5232 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5233 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5234 pFpuCtx->FTW |= RT_BIT(iNewTop);
5235 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5236 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5237 iemFpuRotateStackPush(pFpuCtx);
5238 }
5239 else
5240 {
5241 /* Exception pending - don't change TOP or the register stack. */
5242 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5243 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5244 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5246 }
5247}
5248
5249
5250/**
5251 * Worker routine for raising an FPU stack overflow exception on a push.
5252 *
5253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5254 * @param pFpuCtx The FPU context.
5255 */
5256static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5257{
5258 if (pFpuCtx->FCW & X86_FCW_IM)
5259 {
5260 /* Masked overflow. */
5261 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5262 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5263 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5264 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5265 pFpuCtx->FTW |= RT_BIT(iNewTop);
5266 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5267 iemFpuRotateStackPush(pFpuCtx);
5268 }
5269 else
5270 {
5271 /* Exception pending - don't change TOP or the register stack. */
5272 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5273 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5274 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5275 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5276 }
5277 RT_NOREF(pVCpu);
5278}
5279
5280
5281/**
5282 * Raises a FPU stack overflow exception on a push.
5283 *
5284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5285 */
5286void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5287{
5288 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5289 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5290 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5291}
5292
5293
5294/**
5295 * Raises a FPU stack overflow exception on a push with a memory operand.
5296 *
5297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5298 * @param iEffSeg The effective memory operand selector register.
5299 * @param GCPtrEff The effective memory operand offset.
5300 */
5301void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5302{
5303 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5304 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5305 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5306 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5307}
5308
5309/** @} */
5310
5311
5312/** @name SSE+AVX SIMD access and helpers.
5313 *
5314 * @{
5315 */
5316/**
5317 * Stores a result in a SIMD XMM register, updates the MXCSR.
5318 *
5319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5320 * @param pResult The result to store.
5321 * @param iXmmReg Which SIMD XMM register to store the result in.
5322 */
5323void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5324{
5325 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5326 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5327
5328 /* The result is only updated if there is no unmasked exception pending. */
5329 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5330 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5331 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5332}
5333
5334
5335/**
5336 * Updates the MXCSR.
5337 *
5338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5339 * @param fMxcsr The new MXCSR value.
5340 */
5341void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5342{
5343 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5344 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5345}
5346/** @} */
5347
5348
5349/** @name Memory access.
5350 *
5351 * @{
5352 */
5353
5354
5355/**
5356 * Updates the IEMCPU::cbWritten counter if applicable.
5357 *
5358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5359 * @param fAccess The access being accounted for.
5360 * @param cbMem The access size.
5361 */
5362DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5363{
5364 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5365 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5366 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5367}
5368
5369
5370/**
5371 * Applies the segment limit, base and attributes.
5372 *
5373 * This may raise a \#GP or \#SS.
5374 *
5375 * @returns VBox strict status code.
5376 *
5377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5378 * @param fAccess The kind of access which is being performed.
5379 * @param iSegReg The index of the segment register to apply.
5380 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5381 * TSS, ++).
5382 * @param cbMem The access size.
5383 * @param pGCPtrMem Pointer to the guest memory address to apply
5384 * segmentation to. Input and output parameter.
5385 */
5386VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5387{
5388 if (iSegReg == UINT8_MAX)
5389 return VINF_SUCCESS;
5390
5391 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5392 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5393 switch (pVCpu->iem.s.enmCpuMode)
5394 {
5395 case IEMMODE_16BIT:
5396 case IEMMODE_32BIT:
5397 {
5398 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5399 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5400
5401 if ( pSel->Attr.n.u1Present
5402 && !pSel->Attr.n.u1Unusable)
5403 {
5404 Assert(pSel->Attr.n.u1DescType);
5405 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5406 {
5407 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5408 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5409 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5410
5411 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5412 {
5413 /** @todo CPL check. */
5414 }
5415
5416 /*
5417 * There are two kinds of data selectors, normal and expand down.
5418 */
5419 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5420 {
5421 if ( GCPtrFirst32 > pSel->u32Limit
5422 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5423 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5424 }
5425 else
5426 {
5427 /*
5428 * The upper boundary is defined by the B bit, not the G bit!
5429 */
5430 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5431 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5432 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5433 }
5434 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5435 }
5436 else
5437 {
5438 /*
5439 * Code selector and usually be used to read thru, writing is
5440 * only permitted in real and V8086 mode.
5441 */
5442 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5443 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5444 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5445 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5446 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5447
5448 if ( GCPtrFirst32 > pSel->u32Limit
5449 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5450 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5451
5452 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5453 {
5454 /** @todo CPL check. */
5455 }
5456
5457 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5458 }
5459 }
5460 else
5461 return iemRaiseGeneralProtectionFault0(pVCpu);
5462 return VINF_SUCCESS;
5463 }
5464
5465 case IEMMODE_64BIT:
5466 {
5467 RTGCPTR GCPtrMem = *pGCPtrMem;
5468 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5469 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5470
5471 Assert(cbMem >= 1);
5472 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5473 return VINF_SUCCESS;
5474 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5475 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5476 return iemRaiseGeneralProtectionFault0(pVCpu);
5477 }
5478
5479 default:
5480 AssertFailedReturn(VERR_IEM_IPE_7);
5481 }
5482}
5483
5484
5485/**
5486 * Translates a virtual address to a physical physical address and checks if we
5487 * can access the page as specified.
5488 *
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param GCPtrMem The virtual address.
5491 * @param cbAccess The access size, for raising \#PF correctly for
5492 * FXSAVE and such.
5493 * @param fAccess The intended access.
5494 * @param pGCPhysMem Where to return the physical address.
5495 */
5496VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5497 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5498{
5499 /** @todo Need a different PGM interface here. We're currently using
5500 * generic / REM interfaces. this won't cut it for R0. */
5501 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5502 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5503 * here. */
5504 PGMPTWALK Walk;
5505 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5506 if (RT_FAILURE(rc))
5507 {
5508 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5509 /** @todo Check unassigned memory in unpaged mode. */
5510 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5511#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5512 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5513 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5514#endif
5515 *pGCPhysMem = NIL_RTGCPHYS;
5516 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5517 }
5518
5519 /* If the page is writable and does not have the no-exec bit set, all
5520 access is allowed. Otherwise we'll have to check more carefully... */
5521 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5522 {
5523 /* Write to read only memory? */
5524 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5525 && !(Walk.fEffective & X86_PTE_RW)
5526 && ( ( pVCpu->iem.s.uCpl == 3
5527 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5528 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5529 {
5530 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5531 *pGCPhysMem = NIL_RTGCPHYS;
5532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5533 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5534 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5535#endif
5536 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5537 }
5538
5539 /* Kernel memory accessed by userland? */
5540 if ( !(Walk.fEffective & X86_PTE_US)
5541 && pVCpu->iem.s.uCpl == 3
5542 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5543 {
5544 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5545 *pGCPhysMem = NIL_RTGCPHYS;
5546#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5547 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5548 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5549#endif
5550 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5551 }
5552
5553 /* Executing non-executable memory? */
5554 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5555 && (Walk.fEffective & X86_PTE_PAE_NX)
5556 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5557 {
5558 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5559 *pGCPhysMem = NIL_RTGCPHYS;
5560#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5561 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5562 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5563#endif
5564 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5565 VERR_ACCESS_DENIED);
5566 }
5567 }
5568
5569 /*
5570 * Set the dirty / access flags.
5571 * ASSUMES this is set when the address is translated rather than on committ...
5572 */
5573 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5574 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5575 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5576 {
5577 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5578 AssertRC(rc2);
5579 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5580 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5581 }
5582
5583 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5584 *pGCPhysMem = GCPhys;
5585 return VINF_SUCCESS;
5586}
5587
5588
5589/**
5590 * Looks up a memory mapping entry.
5591 *
5592 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5594 * @param pvMem The memory address.
5595 * @param fAccess The access to.
5596 */
5597DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5598{
5599 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5600 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5601 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5602 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5603 return 0;
5604 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5605 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5606 return 1;
5607 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5608 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5609 return 2;
5610 return VERR_NOT_FOUND;
5611}
5612
5613
5614/**
5615 * Finds a free memmap entry when using iNextMapping doesn't work.
5616 *
5617 * @returns Memory mapping index, 1024 on failure.
5618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5619 */
5620static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5621{
5622 /*
5623 * The easy case.
5624 */
5625 if (pVCpu->iem.s.cActiveMappings == 0)
5626 {
5627 pVCpu->iem.s.iNextMapping = 1;
5628 return 0;
5629 }
5630
5631 /* There should be enough mappings for all instructions. */
5632 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5633
5634 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5635 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5636 return i;
5637
5638 AssertFailedReturn(1024);
5639}
5640
5641
5642/**
5643 * Commits a bounce buffer that needs writing back and unmaps it.
5644 *
5645 * @returns Strict VBox status code.
5646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5647 * @param iMemMap The index of the buffer to commit.
5648 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5649 * Always false in ring-3, obviously.
5650 */
5651static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5652{
5653 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5654 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5655#ifdef IN_RING3
5656 Assert(!fPostponeFail);
5657 RT_NOREF_PV(fPostponeFail);
5658#endif
5659
5660 /*
5661 * Do the writing.
5662 */
5663 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5664 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5665 {
5666 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5667 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5668 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5669 if (!pVCpu->iem.s.fBypassHandlers)
5670 {
5671 /*
5672 * Carefully and efficiently dealing with access handler return
5673 * codes make this a little bloated.
5674 */
5675 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5676 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5677 pbBuf,
5678 cbFirst,
5679 PGMACCESSORIGIN_IEM);
5680 if (rcStrict == VINF_SUCCESS)
5681 {
5682 if (cbSecond)
5683 {
5684 rcStrict = PGMPhysWrite(pVM,
5685 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5686 pbBuf + cbFirst,
5687 cbSecond,
5688 PGMACCESSORIGIN_IEM);
5689 if (rcStrict == VINF_SUCCESS)
5690 { /* nothing */ }
5691 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5692 {
5693 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5696 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5697 }
5698#ifndef IN_RING3
5699 else if (fPostponeFail)
5700 {
5701 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5702 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5703 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5704 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5705 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5706 return iemSetPassUpStatus(pVCpu, rcStrict);
5707 }
5708#endif
5709 else
5710 {
5711 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5712 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5713 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5714 return rcStrict;
5715 }
5716 }
5717 }
5718 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5719 {
5720 if (!cbSecond)
5721 {
5722 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5723 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5724 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5725 }
5726 else
5727 {
5728 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5729 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5730 pbBuf + cbFirst,
5731 cbSecond,
5732 PGMACCESSORIGIN_IEM);
5733 if (rcStrict2 == VINF_SUCCESS)
5734 {
5735 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5737 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5738 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5739 }
5740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5741 {
5742 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5743 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5744 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5745 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5746 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5747 }
5748#ifndef IN_RING3
5749 else if (fPostponeFail)
5750 {
5751 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5752 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5753 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5754 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5755 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5756 return iemSetPassUpStatus(pVCpu, rcStrict);
5757 }
5758#endif
5759 else
5760 {
5761 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5762 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5763 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5764 return rcStrict2;
5765 }
5766 }
5767 }
5768#ifndef IN_RING3
5769 else if (fPostponeFail)
5770 {
5771 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5772 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5773 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5774 if (!cbSecond)
5775 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5776 else
5777 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5778 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5779 return iemSetPassUpStatus(pVCpu, rcStrict);
5780 }
5781#endif
5782 else
5783 {
5784 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5785 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5786 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5787 return rcStrict;
5788 }
5789 }
5790 else
5791 {
5792 /*
5793 * No access handlers, much simpler.
5794 */
5795 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5796 if (RT_SUCCESS(rc))
5797 {
5798 if (cbSecond)
5799 {
5800 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5801 if (RT_SUCCESS(rc))
5802 { /* likely */ }
5803 else
5804 {
5805 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5808 return rc;
5809 }
5810 }
5811 }
5812 else
5813 {
5814 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5817 return rc;
5818 }
5819 }
5820 }
5821
5822#if defined(IEM_LOG_MEMORY_WRITES)
5823 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5824 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5825 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5826 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5827 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5828 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5829
5830 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5831 g_cbIemWrote = cbWrote;
5832 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5833#endif
5834
5835 /*
5836 * Free the mapping entry.
5837 */
5838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5839 Assert(pVCpu->iem.s.cActiveMappings != 0);
5840 pVCpu->iem.s.cActiveMappings--;
5841 return VINF_SUCCESS;
5842}
5843
5844
5845/**
5846 * iemMemMap worker that deals with a request crossing pages.
5847 */
5848static VBOXSTRICTRC
5849iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5850{
5851 Assert(cbMem <= GUEST_PAGE_SIZE);
5852
5853 /*
5854 * Do the address translations.
5855 */
5856 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5857 RTGCPHYS GCPhysFirst;
5858 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5859 if (rcStrict != VINF_SUCCESS)
5860 return rcStrict;
5861 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5862
5863 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5864 RTGCPHYS GCPhysSecond;
5865 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5866 cbSecondPage, fAccess, &GCPhysSecond);
5867 if (rcStrict != VINF_SUCCESS)
5868 return rcStrict;
5869 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5870 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5871
5872 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5873
5874 /*
5875 * Read in the current memory content if it's a read, execute or partial
5876 * write access.
5877 */
5878 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5879
5880 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5881 {
5882 if (!pVCpu->iem.s.fBypassHandlers)
5883 {
5884 /*
5885 * Must carefully deal with access handler status codes here,
5886 * makes the code a bit bloated.
5887 */
5888 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5889 if (rcStrict == VINF_SUCCESS)
5890 {
5891 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5892 if (rcStrict == VINF_SUCCESS)
5893 { /*likely */ }
5894 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5895 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5896 else
5897 {
5898 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5899 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5900 return rcStrict;
5901 }
5902 }
5903 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5904 {
5905 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5906 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5907 {
5908 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5909 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5910 }
5911 else
5912 {
5913 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5914 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5915 return rcStrict2;
5916 }
5917 }
5918 else
5919 {
5920 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5921 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5922 return rcStrict;
5923 }
5924 }
5925 else
5926 {
5927 /*
5928 * No informational status codes here, much more straight forward.
5929 */
5930 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5931 if (RT_SUCCESS(rc))
5932 {
5933 Assert(rc == VINF_SUCCESS);
5934 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5935 if (RT_SUCCESS(rc))
5936 Assert(rc == VINF_SUCCESS);
5937 else
5938 {
5939 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5940 return rc;
5941 }
5942 }
5943 else
5944 {
5945 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5946 return rc;
5947 }
5948 }
5949 }
5950#ifdef VBOX_STRICT
5951 else
5952 memset(pbBuf, 0xcc, cbMem);
5953 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5954 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5955#endif
5956 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5957
5958 /*
5959 * Commit the bounce buffer entry.
5960 */
5961 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5964 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5965 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5966 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5967 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5968 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5969 pVCpu->iem.s.cActiveMappings++;
5970
5971 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5972 *ppvMem = pbBuf;
5973 return VINF_SUCCESS;
5974}
5975
5976
5977/**
5978 * iemMemMap woker that deals with iemMemPageMap failures.
5979 */
5980static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5981 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5982{
5983 /*
5984 * Filter out conditions we can handle and the ones which shouldn't happen.
5985 */
5986 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5987 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5988 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5989 {
5990 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5991 return rcMap;
5992 }
5993 pVCpu->iem.s.cPotentialExits++;
5994
5995 /*
5996 * Read in the current memory content if it's a read, execute or partial
5997 * write access.
5998 */
5999 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6000 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6001 {
6002 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6003 memset(pbBuf, 0xff, cbMem);
6004 else
6005 {
6006 int rc;
6007 if (!pVCpu->iem.s.fBypassHandlers)
6008 {
6009 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6010 if (rcStrict == VINF_SUCCESS)
6011 { /* nothing */ }
6012 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6013 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6014 else
6015 {
6016 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6017 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6018 return rcStrict;
6019 }
6020 }
6021 else
6022 {
6023 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6024 if (RT_SUCCESS(rc))
6025 { /* likely */ }
6026 else
6027 {
6028 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6029 GCPhysFirst, rc));
6030 return rc;
6031 }
6032 }
6033 }
6034 }
6035#ifdef VBOX_STRICT
6036 else
6037 memset(pbBuf, 0xcc, cbMem);
6038#endif
6039#ifdef VBOX_STRICT
6040 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6041 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6042#endif
6043
6044 /*
6045 * Commit the bounce buffer entry.
6046 */
6047 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6049 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6050 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6051 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6052 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6054 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6055 pVCpu->iem.s.cActiveMappings++;
6056
6057 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6058 *ppvMem = pbBuf;
6059 return VINF_SUCCESS;
6060}
6061
6062
6063
6064/**
6065 * Maps the specified guest memory for the given kind of access.
6066 *
6067 * This may be using bounce buffering of the memory if it's crossing a page
6068 * boundary or if there is an access handler installed for any of it. Because
6069 * of lock prefix guarantees, we're in for some extra clutter when this
6070 * happens.
6071 *
6072 * This may raise a \#GP, \#SS, \#PF or \#AC.
6073 *
6074 * @returns VBox strict status code.
6075 *
6076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6077 * @param ppvMem Where to return the pointer to the mapped memory.
6078 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6079 * 8, 12, 16, 32 or 512. When used by string operations
6080 * it can be up to a page.
6081 * @param iSegReg The index of the segment register to use for this
6082 * access. The base and limits are checked. Use UINT8_MAX
6083 * to indicate that no segmentation is required (for IDT,
6084 * GDT and LDT accesses).
6085 * @param GCPtrMem The address of the guest memory.
6086 * @param fAccess How the memory is being accessed. The
6087 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6088 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6089 * when raising exceptions.
6090 * @param uAlignCtl Alignment control:
6091 * - Bits 15:0 is the alignment mask.
6092 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6093 * IEM_MEMMAP_F_ALIGN_SSE, and
6094 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6095 * Pass zero to skip alignment.
6096 */
6097VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6098 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6099{
6100 /*
6101 * Check the input and figure out which mapping entry to use.
6102 */
6103 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6104 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6105 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6106 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6107 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6108
6109 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6110 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6111 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6112 {
6113 iMemMap = iemMemMapFindFree(pVCpu);
6114 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6115 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6116 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6117 pVCpu->iem.s.aMemMappings[2].fAccess),
6118 VERR_IEM_IPE_9);
6119 }
6120
6121 /*
6122 * Map the memory, checking that we can actually access it. If something
6123 * slightly complicated happens, fall back on bounce buffering.
6124 */
6125 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6126 if (rcStrict == VINF_SUCCESS)
6127 { /* likely */ }
6128 else
6129 return rcStrict;
6130
6131 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6132 { /* likely */ }
6133 else
6134 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6135
6136 /*
6137 * Alignment check.
6138 */
6139 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6140 { /* likelyish */ }
6141 else
6142 {
6143 /* Misaligned access. */
6144 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6145 {
6146 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6147 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6148 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6149 {
6150 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6151
6152 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6153 return iemRaiseAlignmentCheckException(pVCpu);
6154 }
6155 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6156 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6157 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6158 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6159 * that's what FXSAVE does on a 10980xe. */
6160 && iemMemAreAlignmentChecksEnabled(pVCpu))
6161 return iemRaiseAlignmentCheckException(pVCpu);
6162 else
6163 return iemRaiseGeneralProtectionFault0(pVCpu);
6164 }
6165 }
6166
6167#ifdef IEM_WITH_DATA_TLB
6168 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6169
6170 /*
6171 * Get the TLB entry for this page.
6172 */
6173 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6174 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6175 if (pTlbe->uTag == uTag)
6176 {
6177# ifdef VBOX_WITH_STATISTICS
6178 pVCpu->iem.s.DataTlb.cTlbHits++;
6179# endif
6180 }
6181 else
6182 {
6183 pVCpu->iem.s.DataTlb.cTlbMisses++;
6184 PGMPTWALK Walk;
6185 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6186 if (RT_FAILURE(rc))
6187 {
6188 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6189# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6190 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6191 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6192# endif
6193 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6194 }
6195
6196 Assert(Walk.fSucceeded);
6197 pTlbe->uTag = uTag;
6198 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6199 pTlbe->GCPhys = Walk.GCPhys;
6200 pTlbe->pbMappingR3 = NULL;
6201 }
6202
6203 /*
6204 * Check TLB page table level access flags.
6205 */
6206 /* If the page is either supervisor only or non-writable, we need to do
6207 more careful access checks. */
6208 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6209 {
6210 /* Write to read only memory? */
6211 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6212 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6213 && ( ( pVCpu->iem.s.uCpl == 3
6214 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6215 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6216 {
6217 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6218# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6219 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6220 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6221# endif
6222 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6223 }
6224
6225 /* Kernel memory accessed by userland? */
6226 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6227 && pVCpu->iem.s.uCpl == 3
6228 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6229 {
6230 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6231# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6232 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6233 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6234# endif
6235 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6236 }
6237 }
6238
6239 /*
6240 * Set the dirty / access flags.
6241 * ASSUMES this is set when the address is translated rather than on commit...
6242 */
6243 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6244 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6245 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6246 {
6247 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6248 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6249 AssertRC(rc2);
6250 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6251 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6252 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6253 }
6254
6255 /*
6256 * Look up the physical page info if necessary.
6257 */
6258 uint8_t *pbMem = NULL;
6259 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6260# ifdef IN_RING3
6261 pbMem = pTlbe->pbMappingR3;
6262# else
6263 pbMem = NULL;
6264# endif
6265 else
6266 {
6267 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6268 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6269 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6270 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6271 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6272 { /* likely */ }
6273 else
6274 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6275 pTlbe->pbMappingR3 = NULL;
6276 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6277 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6278 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6279 &pbMem, &pTlbe->fFlagsAndPhysRev);
6280 AssertRCReturn(rc, rc);
6281# ifdef IN_RING3
6282 pTlbe->pbMappingR3 = pbMem;
6283# endif
6284 }
6285
6286 /*
6287 * Check the physical page level access and mapping.
6288 */
6289 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6290 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6291 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6292 { /* probably likely */ }
6293 else
6294 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6295 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6296 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6297 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6298 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6299 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6300
6301 if (pbMem)
6302 {
6303 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6304 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6305 fAccess |= IEM_ACCESS_NOT_LOCKED;
6306 }
6307 else
6308 {
6309 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6310 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6311 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6312 if (rcStrict != VINF_SUCCESS)
6313 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6314 }
6315
6316 void * const pvMem = pbMem;
6317
6318 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6319 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6320 if (fAccess & IEM_ACCESS_TYPE_READ)
6321 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6322
6323#else /* !IEM_WITH_DATA_TLB */
6324
6325 RTGCPHYS GCPhysFirst;
6326 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6327 if (rcStrict != VINF_SUCCESS)
6328 return rcStrict;
6329
6330 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6331 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6332 if (fAccess & IEM_ACCESS_TYPE_READ)
6333 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6334
6335 void *pvMem;
6336 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6337 if (rcStrict != VINF_SUCCESS)
6338 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6339
6340#endif /* !IEM_WITH_DATA_TLB */
6341
6342 /*
6343 * Fill in the mapping table entry.
6344 */
6345 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6346 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6347 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6348 pVCpu->iem.s.cActiveMappings += 1;
6349
6350 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6351 *ppvMem = pvMem;
6352
6353 return VINF_SUCCESS;
6354}
6355
6356
6357/**
6358 * Commits the guest memory if bounce buffered and unmaps it.
6359 *
6360 * @returns Strict VBox status code.
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param pvMem The mapping.
6363 * @param fAccess The kind of access.
6364 */
6365VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6366{
6367 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6368 AssertReturn(iMemMap >= 0, iMemMap);
6369
6370 /* If it's bounce buffered, we may need to write back the buffer. */
6371 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6372 {
6373 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6374 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6375 }
6376 /* Otherwise unlock it. */
6377 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6378 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6379
6380 /* Free the entry. */
6381 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6382 Assert(pVCpu->iem.s.cActiveMappings != 0);
6383 pVCpu->iem.s.cActiveMappings--;
6384 return VINF_SUCCESS;
6385}
6386
6387#ifdef IEM_WITH_SETJMP
6388
6389/**
6390 * Maps the specified guest memory for the given kind of access, longjmp on
6391 * error.
6392 *
6393 * This may be using bounce buffering of the memory if it's crossing a page
6394 * boundary or if there is an access handler installed for any of it. Because
6395 * of lock prefix guarantees, we're in for some extra clutter when this
6396 * happens.
6397 *
6398 * This may raise a \#GP, \#SS, \#PF or \#AC.
6399 *
6400 * @returns Pointer to the mapped memory.
6401 *
6402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6403 * @param cbMem The number of bytes to map. This is usually 1,
6404 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6405 * string operations it can be up to a page.
6406 * @param iSegReg The index of the segment register to use for
6407 * this access. The base and limits are checked.
6408 * Use UINT8_MAX to indicate that no segmentation
6409 * is required (for IDT, GDT and LDT accesses).
6410 * @param GCPtrMem The address of the guest memory.
6411 * @param fAccess How the memory is being accessed. The
6412 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6413 * how to map the memory, while the
6414 * IEM_ACCESS_WHAT_XXX bit is used when raising
6415 * exceptions.
6416 * @param uAlignCtl Alignment control:
6417 * - Bits 15:0 is the alignment mask.
6418 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6419 * IEM_MEMMAP_F_ALIGN_SSE, and
6420 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6421 * Pass zero to skip alignment.
6422 */
6423void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6424 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6425{
6426 /*
6427 * Check the input, check segment access and adjust address
6428 * with segment base.
6429 */
6430 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6431 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6432 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6433
6434 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6435 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6436 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6437
6438 /*
6439 * Alignment check.
6440 */
6441 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6442 { /* likelyish */ }
6443 else
6444 {
6445 /* Misaligned access. */
6446 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6447 {
6448 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6449 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6450 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6451 {
6452 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6453
6454 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6455 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6456 }
6457 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6458 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6459 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6460 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6461 * that's what FXSAVE does on a 10980xe. */
6462 && iemMemAreAlignmentChecksEnabled(pVCpu))
6463 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6464 else
6465 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6466 }
6467 }
6468
6469 /*
6470 * Figure out which mapping entry to use.
6471 */
6472 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6473 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6474 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6475 {
6476 iMemMap = iemMemMapFindFree(pVCpu);
6477 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6478 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6479 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6480 pVCpu->iem.s.aMemMappings[2].fAccess),
6481 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6482 }
6483
6484 /*
6485 * Crossing a page boundary?
6486 */
6487 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6488 { /* No (likely). */ }
6489 else
6490 {
6491 void *pvMem;
6492 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6493 if (rcStrict == VINF_SUCCESS)
6494 return pvMem;
6495 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6496 }
6497
6498#ifdef IEM_WITH_DATA_TLB
6499 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6500
6501 /*
6502 * Get the TLB entry for this page.
6503 */
6504 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6505 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6506 if (pTlbe->uTag == uTag)
6507 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6508 else
6509 {
6510 pVCpu->iem.s.DataTlb.cTlbMisses++;
6511 PGMPTWALK Walk;
6512 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6513 if (RT_FAILURE(rc))
6514 {
6515 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6516# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6517 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6518 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6519# endif
6520 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6521 }
6522
6523 Assert(Walk.fSucceeded);
6524 pTlbe->uTag = uTag;
6525 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6526 pTlbe->GCPhys = Walk.GCPhys;
6527 pTlbe->pbMappingR3 = NULL;
6528 }
6529
6530 /*
6531 * Check the flags and physical revision.
6532 */
6533 /** @todo make the caller pass these in with fAccess. */
6534 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6535 ? IEMTLBE_F_PT_NO_USER : 0;
6536 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6537 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6538 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6539 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6540 ? IEMTLBE_F_PT_NO_WRITE : 0)
6541 : 0;
6542 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6543 uint8_t *pbMem = NULL;
6544 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6545 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6546# ifdef IN_RING3
6547 pbMem = pTlbe->pbMappingR3;
6548# else
6549 pbMem = NULL;
6550# endif
6551 else
6552 {
6553 /*
6554 * Okay, something isn't quite right or needs refreshing.
6555 */
6556 /* Write to read only memory? */
6557 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6558 {
6559 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6560# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6561 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6562 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6563# endif
6564 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6565 }
6566
6567 /* Kernel memory accessed by userland? */
6568 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6569 {
6570 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6571# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6572 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6573 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6574# endif
6575 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6576 }
6577
6578 /* Set the dirty / access flags.
6579 ASSUMES this is set when the address is translated rather than on commit... */
6580 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6581 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6582 {
6583 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6584 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6585 AssertRC(rc2);
6586 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6587 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6588 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6589 }
6590
6591 /*
6592 * Check if the physical page info needs updating.
6593 */
6594 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6595# ifdef IN_RING3
6596 pbMem = pTlbe->pbMappingR3;
6597# else
6598 pbMem = NULL;
6599# endif
6600 else
6601 {
6602 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6603 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6604 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6605 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6606 pTlbe->pbMappingR3 = NULL;
6607 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6608 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6609 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6610 &pbMem, &pTlbe->fFlagsAndPhysRev);
6611 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6612# ifdef IN_RING3
6613 pTlbe->pbMappingR3 = pbMem;
6614# endif
6615 }
6616
6617 /*
6618 * Check the physical page level access and mapping.
6619 */
6620 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6621 { /* probably likely */ }
6622 else
6623 {
6624 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6625 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6626 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6627 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6628 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6629 if (rcStrict == VINF_SUCCESS)
6630 return pbMem;
6631 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6632 }
6633 }
6634 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6635
6636 if (pbMem)
6637 {
6638 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6639 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6640 fAccess |= IEM_ACCESS_NOT_LOCKED;
6641 }
6642 else
6643 {
6644 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6645 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6646 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6647 if (rcStrict == VINF_SUCCESS)
6648 return pbMem;
6649 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6650 }
6651
6652 void * const pvMem = pbMem;
6653
6654 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6655 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6656 if (fAccess & IEM_ACCESS_TYPE_READ)
6657 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6658
6659#else /* !IEM_WITH_DATA_TLB */
6660
6661
6662 RTGCPHYS GCPhysFirst;
6663 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6664 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6665 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6666
6667 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6668 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6669 if (fAccess & IEM_ACCESS_TYPE_READ)
6670 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6671
6672 void *pvMem;
6673 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6674 if (rcStrict == VINF_SUCCESS)
6675 { /* likely */ }
6676 else
6677 {
6678 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6679 if (rcStrict == VINF_SUCCESS)
6680 return pvMem;
6681 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6682 }
6683
6684#endif /* !IEM_WITH_DATA_TLB */
6685
6686 /*
6687 * Fill in the mapping table entry.
6688 */
6689 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6690 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6691 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6692 pVCpu->iem.s.cActiveMappings++;
6693
6694 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6695 return pvMem;
6696}
6697
6698
6699/**
6700 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6701 *
6702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6703 * @param pvMem The mapping.
6704 * @param fAccess The kind of access.
6705 */
6706void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6707{
6708 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6709 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6710
6711 /* If it's bounce buffered, we may need to write back the buffer. */
6712 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6713 {
6714 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6715 {
6716 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6717 if (rcStrict == VINF_SUCCESS)
6718 return;
6719 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6720 }
6721 }
6722 /* Otherwise unlock it. */
6723 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6724 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6725
6726 /* Free the entry. */
6727 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6728 Assert(pVCpu->iem.s.cActiveMappings != 0);
6729 pVCpu->iem.s.cActiveMappings--;
6730}
6731
6732#endif /* IEM_WITH_SETJMP */
6733
6734#ifndef IN_RING3
6735/**
6736 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6737 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6738 *
6739 * Allows the instruction to be completed and retired, while the IEM user will
6740 * return to ring-3 immediately afterwards and do the postponed writes there.
6741 *
6742 * @returns VBox status code (no strict statuses). Caller must check
6743 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param pvMem The mapping.
6746 * @param fAccess The kind of access.
6747 */
6748VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6749{
6750 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6751 AssertReturn(iMemMap >= 0, iMemMap);
6752
6753 /* If it's bounce buffered, we may need to write back the buffer. */
6754 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6755 {
6756 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6757 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6758 }
6759 /* Otherwise unlock it. */
6760 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6761 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6762
6763 /* Free the entry. */
6764 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6765 Assert(pVCpu->iem.s.cActiveMappings != 0);
6766 pVCpu->iem.s.cActiveMappings--;
6767 return VINF_SUCCESS;
6768}
6769#endif
6770
6771
6772/**
6773 * Rollbacks mappings, releasing page locks and such.
6774 *
6775 * The caller shall only call this after checking cActiveMappings.
6776 *
6777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6778 */
6779void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6780{
6781 Assert(pVCpu->iem.s.cActiveMappings > 0);
6782
6783 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6784 while (iMemMap-- > 0)
6785 {
6786 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6787 if (fAccess != IEM_ACCESS_INVALID)
6788 {
6789 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6790 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6791 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6792 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6793 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6794 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6795 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6797 pVCpu->iem.s.cActiveMappings--;
6798 }
6799 }
6800}
6801
6802
6803/**
6804 * Fetches a data byte.
6805 *
6806 * @returns Strict VBox status code.
6807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6808 * @param pu8Dst Where to return the byte.
6809 * @param iSegReg The index of the segment register to use for
6810 * this access. The base and limits are checked.
6811 * @param GCPtrMem The address of the guest memory.
6812 */
6813VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6814{
6815 /* The lazy approach for now... */
6816 uint8_t const *pu8Src;
6817 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6818 if (rc == VINF_SUCCESS)
6819 {
6820 *pu8Dst = *pu8Src;
6821 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6822 }
6823 return rc;
6824}
6825
6826
6827#ifdef IEM_WITH_SETJMP
6828/**
6829 * Fetches a data byte, longjmp on error.
6830 *
6831 * @returns The byte.
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 * @param iSegReg The index of the segment register to use for
6834 * this access. The base and limits are checked.
6835 * @param GCPtrMem The address of the guest memory.
6836 */
6837uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6838{
6839 /* The lazy approach for now... */
6840 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6841 uint8_t const bRet = *pu8Src;
6842 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6843 return bRet;
6844}
6845#endif /* IEM_WITH_SETJMP */
6846
6847
6848/**
6849 * Fetches a data word.
6850 *
6851 * @returns Strict VBox status code.
6852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6853 * @param pu16Dst Where to return the word.
6854 * @param iSegReg The index of the segment register to use for
6855 * this access. The base and limits are checked.
6856 * @param GCPtrMem The address of the guest memory.
6857 */
6858VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6859{
6860 /* The lazy approach for now... */
6861 uint16_t const *pu16Src;
6862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6863 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6864 if (rc == VINF_SUCCESS)
6865 {
6866 *pu16Dst = *pu16Src;
6867 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6868 }
6869 return rc;
6870}
6871
6872
6873#ifdef IEM_WITH_SETJMP
6874/**
6875 * Fetches a data word, longjmp on error.
6876 *
6877 * @returns The word
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 * @param iSegReg The index of the segment register to use for
6880 * this access. The base and limits are checked.
6881 * @param GCPtrMem The address of the guest memory.
6882 */
6883uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6884{
6885 /* The lazy approach for now... */
6886 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6887 sizeof(*pu16Src) - 1);
6888 uint16_t const u16Ret = *pu16Src;
6889 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6890 return u16Ret;
6891}
6892#endif
6893
6894
6895/**
6896 * Fetches a data dword.
6897 *
6898 * @returns Strict VBox status code.
6899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6900 * @param pu32Dst Where to return the dword.
6901 * @param iSegReg The index of the segment register to use for
6902 * this access. The base and limits are checked.
6903 * @param GCPtrMem The address of the guest memory.
6904 */
6905VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6906{
6907 /* The lazy approach for now... */
6908 uint32_t const *pu32Src;
6909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6910 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6911 if (rc == VINF_SUCCESS)
6912 {
6913 *pu32Dst = *pu32Src;
6914 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6915 }
6916 return rc;
6917}
6918
6919
6920/**
6921 * Fetches a data dword and zero extends it to a qword.
6922 *
6923 * @returns Strict VBox status code.
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 * @param pu64Dst Where to return the qword.
6926 * @param iSegReg The index of the segment register to use for
6927 * this access. The base and limits are checked.
6928 * @param GCPtrMem The address of the guest memory.
6929 */
6930VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6931{
6932 /* The lazy approach for now... */
6933 uint32_t const *pu32Src;
6934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6935 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6936 if (rc == VINF_SUCCESS)
6937 {
6938 *pu64Dst = *pu32Src;
6939 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6940 }
6941 return rc;
6942}
6943
6944
6945#ifdef IEM_WITH_SETJMP
6946
6947/**
6948 * Fetches a data dword, longjmp on error, fallback/safe version.
6949 *
6950 * @returns The dword
6951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6952 * @param iSegReg The index of the segment register to use for
6953 * this access. The base and limits are checked.
6954 * @param GCPtrMem The address of the guest memory.
6955 */
6956uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6957{
6958 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6959 sizeof(*pu32Src) - 1);
6960 uint32_t const u32Ret = *pu32Src;
6961 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6962 return u32Ret;
6963}
6964
6965
6966/**
6967 * Fetches a data dword, longjmp on error.
6968 *
6969 * @returns The dword
6970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6971 * @param iSegReg The index of the segment register to use for
6972 * this access. The base and limits are checked.
6973 * @param GCPtrMem The address of the guest memory.
6974 */
6975uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6976{
6977# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6978 /*
6979 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6980 */
6981 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6982 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6983 {
6984 /*
6985 * TLB lookup.
6986 */
6987 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6988 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6989 if (pTlbe->uTag == uTag)
6990 {
6991 /*
6992 * Check TLB page table level access flags.
6993 */
6994 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6995 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6996 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6997 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6998 {
6999 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7000
7001 /*
7002 * Alignment check:
7003 */
7004 /** @todo check priority \#AC vs \#PF */
7005 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7006 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7007 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7008 || pVCpu->iem.s.uCpl != 3)
7009 {
7010 /*
7011 * Fetch and return the dword
7012 */
7013 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7014 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7015 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7016 }
7017 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7018 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7019 }
7020 }
7021 }
7022
7023 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7024 outdated page pointer, or other troubles. */
7025 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7026 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7027
7028# else
7029 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7030 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7031 uint32_t const u32Ret = *pu32Src;
7032 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7033 return u32Ret;
7034# endif
7035}
7036#endif
7037
7038
7039#ifdef SOME_UNUSED_FUNCTION
7040/**
7041 * Fetches a data dword and sign extends it to a qword.
7042 *
7043 * @returns Strict VBox status code.
7044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7045 * @param pu64Dst Where to return the sign extended value.
7046 * @param iSegReg The index of the segment register to use for
7047 * this access. The base and limits are checked.
7048 * @param GCPtrMem The address of the guest memory.
7049 */
7050VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7051{
7052 /* The lazy approach for now... */
7053 int32_t const *pi32Src;
7054 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7055 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7056 if (rc == VINF_SUCCESS)
7057 {
7058 *pu64Dst = *pi32Src;
7059 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7060 }
7061#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7062 else
7063 *pu64Dst = 0;
7064#endif
7065 return rc;
7066}
7067#endif
7068
7069
7070/**
7071 * Fetches a data qword.
7072 *
7073 * @returns Strict VBox status code.
7074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7075 * @param pu64Dst Where to return the qword.
7076 * @param iSegReg The index of the segment register to use for
7077 * this access. The base and limits are checked.
7078 * @param GCPtrMem The address of the guest memory.
7079 */
7080VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7081{
7082 /* The lazy approach for now... */
7083 uint64_t const *pu64Src;
7084 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7085 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7086 if (rc == VINF_SUCCESS)
7087 {
7088 *pu64Dst = *pu64Src;
7089 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7090 }
7091 return rc;
7092}
7093
7094
7095#ifdef IEM_WITH_SETJMP
7096/**
7097 * Fetches a data qword, longjmp on error.
7098 *
7099 * @returns The qword.
7100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7101 * @param iSegReg The index of the segment register to use for
7102 * this access. The base and limits are checked.
7103 * @param GCPtrMem The address of the guest memory.
7104 */
7105uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7106{
7107 /* The lazy approach for now... */
7108 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7109 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7110 uint64_t const u64Ret = *pu64Src;
7111 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7112 return u64Ret;
7113}
7114#endif
7115
7116
7117/**
7118 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7119 *
7120 * @returns Strict VBox status code.
7121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7122 * @param pu64Dst Where to return the qword.
7123 * @param iSegReg The index of the segment register to use for
7124 * this access. The base and limits are checked.
7125 * @param GCPtrMem The address of the guest memory.
7126 */
7127VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7128{
7129 /* The lazy approach for now... */
7130 uint64_t const *pu64Src;
7131 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7132 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7133 if (rc == VINF_SUCCESS)
7134 {
7135 *pu64Dst = *pu64Src;
7136 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7137 }
7138 return rc;
7139}
7140
7141
7142#ifdef IEM_WITH_SETJMP
7143/**
7144 * Fetches a data qword, longjmp on error.
7145 *
7146 * @returns The qword.
7147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7148 * @param iSegReg The index of the segment register to use for
7149 * this access. The base and limits are checked.
7150 * @param GCPtrMem The address of the guest memory.
7151 */
7152uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7153{
7154 /* The lazy approach for now... */
7155 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7156 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7157 uint64_t const u64Ret = *pu64Src;
7158 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7159 return u64Ret;
7160}
7161#endif
7162
7163
7164/**
7165 * Fetches a data tword.
7166 *
7167 * @returns Strict VBox status code.
7168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7169 * @param pr80Dst Where to return the tword.
7170 * @param iSegReg The index of the segment register to use for
7171 * this access. The base and limits are checked.
7172 * @param GCPtrMem The address of the guest memory.
7173 */
7174VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7175{
7176 /* The lazy approach for now... */
7177 PCRTFLOAT80U pr80Src;
7178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7179 if (rc == VINF_SUCCESS)
7180 {
7181 *pr80Dst = *pr80Src;
7182 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7183 }
7184 return rc;
7185}
7186
7187
7188#ifdef IEM_WITH_SETJMP
7189/**
7190 * Fetches a data tword, longjmp on error.
7191 *
7192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7193 * @param pr80Dst Where to return the tword.
7194 * @param iSegReg The index of the segment register to use for
7195 * this access. The base and limits are checked.
7196 * @param GCPtrMem The address of the guest memory.
7197 */
7198void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7199{
7200 /* The lazy approach for now... */
7201 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7202 *pr80Dst = *pr80Src;
7203 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7204}
7205#endif
7206
7207
7208/**
7209 * Fetches a data decimal tword.
7210 *
7211 * @returns Strict VBox status code.
7212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7213 * @param pd80Dst Where to return the tword.
7214 * @param iSegReg The index of the segment register to use for
7215 * this access. The base and limits are checked.
7216 * @param GCPtrMem The address of the guest memory.
7217 */
7218VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7219{
7220 /* The lazy approach for now... */
7221 PCRTPBCD80U pd80Src;
7222 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7223 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7224 if (rc == VINF_SUCCESS)
7225 {
7226 *pd80Dst = *pd80Src;
7227 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7228 }
7229 return rc;
7230}
7231
7232
7233#ifdef IEM_WITH_SETJMP
7234/**
7235 * Fetches a data decimal tword, longjmp on error.
7236 *
7237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7238 * @param pd80Dst Where to return the tword.
7239 * @param iSegReg The index of the segment register to use for
7240 * this access. The base and limits are checked.
7241 * @param GCPtrMem The address of the guest memory.
7242 */
7243void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7244{
7245 /* The lazy approach for now... */
7246 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7247 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7248 *pd80Dst = *pd80Src;
7249 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7250}
7251#endif
7252
7253
7254/**
7255 * Fetches a data dqword (double qword), generally SSE related.
7256 *
7257 * @returns Strict VBox status code.
7258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7259 * @param pu128Dst Where to return the qword.
7260 * @param iSegReg The index of the segment register to use for
7261 * this access. The base and limits are checked.
7262 * @param GCPtrMem The address of the guest memory.
7263 */
7264VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7265{
7266 /* The lazy approach for now... */
7267 PCRTUINT128U pu128Src;
7268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7269 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7270 if (rc == VINF_SUCCESS)
7271 {
7272 pu128Dst->au64[0] = pu128Src->au64[0];
7273 pu128Dst->au64[1] = pu128Src->au64[1];
7274 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7275 }
7276 return rc;
7277}
7278
7279
7280#ifdef IEM_WITH_SETJMP
7281/**
7282 * Fetches a data dqword (double qword), generally SSE related.
7283 *
7284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7285 * @param pu128Dst Where to return the qword.
7286 * @param iSegReg The index of the segment register to use for
7287 * this access. The base and limits are checked.
7288 * @param GCPtrMem The address of the guest memory.
7289 */
7290void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7291{
7292 /* The lazy approach for now... */
7293 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7294 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7295 pu128Dst->au64[0] = pu128Src->au64[0];
7296 pu128Dst->au64[1] = pu128Src->au64[1];
7297 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7298}
7299#endif
7300
7301
7302/**
7303 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7304 * related.
7305 *
7306 * Raises \#GP(0) if not aligned.
7307 *
7308 * @returns Strict VBox status code.
7309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7310 * @param pu128Dst Where to return the qword.
7311 * @param iSegReg The index of the segment register to use for
7312 * this access. The base and limits are checked.
7313 * @param GCPtrMem The address of the guest memory.
7314 */
7315VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7316{
7317 /* The lazy approach for now... */
7318 PCRTUINT128U pu128Src;
7319 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7320 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7321 if (rc == VINF_SUCCESS)
7322 {
7323 pu128Dst->au64[0] = pu128Src->au64[0];
7324 pu128Dst->au64[1] = pu128Src->au64[1];
7325 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7326 }
7327 return rc;
7328}
7329
7330
7331#ifdef IEM_WITH_SETJMP
7332/**
7333 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7334 * related, longjmp on error.
7335 *
7336 * Raises \#GP(0) if not aligned.
7337 *
7338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7339 * @param pu128Dst Where to return the qword.
7340 * @param iSegReg The index of the segment register to use for
7341 * this access. The base and limits are checked.
7342 * @param GCPtrMem The address of the guest memory.
7343 */
7344void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7345 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7346{
7347 /* The lazy approach for now... */
7348 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7349 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7350 pu128Dst->au64[0] = pu128Src->au64[0];
7351 pu128Dst->au64[1] = pu128Src->au64[1];
7352 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7353}
7354#endif
7355
7356
7357/**
7358 * Fetches a data oword (octo word), generally AVX related.
7359 *
7360 * @returns Strict VBox status code.
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 * @param pu256Dst Where to return the qword.
7363 * @param iSegReg The index of the segment register to use for
7364 * this access. The base and limits are checked.
7365 * @param GCPtrMem The address of the guest memory.
7366 */
7367VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7368{
7369 /* The lazy approach for now... */
7370 PCRTUINT256U pu256Src;
7371 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7372 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7373 if (rc == VINF_SUCCESS)
7374 {
7375 pu256Dst->au64[0] = pu256Src->au64[0];
7376 pu256Dst->au64[1] = pu256Src->au64[1];
7377 pu256Dst->au64[2] = pu256Src->au64[2];
7378 pu256Dst->au64[3] = pu256Src->au64[3];
7379 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7380 }
7381 return rc;
7382}
7383
7384
7385#ifdef IEM_WITH_SETJMP
7386/**
7387 * Fetches a data oword (octo word), generally AVX related.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param pu256Dst Where to return the qword.
7391 * @param iSegReg The index of the segment register to use for
7392 * this access. The base and limits are checked.
7393 * @param GCPtrMem The address of the guest memory.
7394 */
7395void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7396{
7397 /* The lazy approach for now... */
7398 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7399 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7400 pu256Dst->au64[0] = pu256Src->au64[0];
7401 pu256Dst->au64[1] = pu256Src->au64[1];
7402 pu256Dst->au64[2] = pu256Src->au64[2];
7403 pu256Dst->au64[3] = pu256Src->au64[3];
7404 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7405}
7406#endif
7407
7408
7409/**
7410 * Fetches a data oword (octo word) at an aligned address, generally AVX
7411 * related.
7412 *
7413 * Raises \#GP(0) if not aligned.
7414 *
7415 * @returns Strict VBox status code.
7416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7417 * @param pu256Dst Where to return the qword.
7418 * @param iSegReg The index of the segment register to use for
7419 * this access. The base and limits are checked.
7420 * @param GCPtrMem The address of the guest memory.
7421 */
7422VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7423{
7424 /* The lazy approach for now... */
7425 PCRTUINT256U pu256Src;
7426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7427 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7428 if (rc == VINF_SUCCESS)
7429 {
7430 pu256Dst->au64[0] = pu256Src->au64[0];
7431 pu256Dst->au64[1] = pu256Src->au64[1];
7432 pu256Dst->au64[2] = pu256Src->au64[2];
7433 pu256Dst->au64[3] = pu256Src->au64[3];
7434 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7435 }
7436 return rc;
7437}
7438
7439
7440#ifdef IEM_WITH_SETJMP
7441/**
7442 * Fetches a data oword (octo word) at an aligned address, generally AVX
7443 * related, longjmp on error.
7444 *
7445 * Raises \#GP(0) if not aligned.
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param pu256Dst Where to return the qword.
7449 * @param iSegReg The index of the segment register to use for
7450 * this access. The base and limits are checked.
7451 * @param GCPtrMem The address of the guest memory.
7452 */
7453void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7454 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7455{
7456 /* The lazy approach for now... */
7457 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7458 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7459 pu256Dst->au64[0] = pu256Src->au64[0];
7460 pu256Dst->au64[1] = pu256Src->au64[1];
7461 pu256Dst->au64[2] = pu256Src->au64[2];
7462 pu256Dst->au64[3] = pu256Src->au64[3];
7463 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7464}
7465#endif
7466
7467
7468
7469/**
7470 * Fetches a descriptor register (lgdt, lidt).
7471 *
7472 * @returns Strict VBox status code.
7473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7474 * @param pcbLimit Where to return the limit.
7475 * @param pGCPtrBase Where to return the base.
7476 * @param iSegReg The index of the segment register to use for
7477 * this access. The base and limits are checked.
7478 * @param GCPtrMem The address of the guest memory.
7479 * @param enmOpSize The effective operand size.
7480 */
7481VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7482 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7483{
7484 /*
7485 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7486 * little special:
7487 * - The two reads are done separately.
7488 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7489 * - We suspect the 386 to actually commit the limit before the base in
7490 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7491 * don't try emulate this eccentric behavior, because it's not well
7492 * enough understood and rather hard to trigger.
7493 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7494 */
7495 VBOXSTRICTRC rcStrict;
7496 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7497 {
7498 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7499 if (rcStrict == VINF_SUCCESS)
7500 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7501 }
7502 else
7503 {
7504 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7505 if (enmOpSize == IEMMODE_32BIT)
7506 {
7507 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7508 {
7509 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7510 if (rcStrict == VINF_SUCCESS)
7511 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7512 }
7513 else
7514 {
7515 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7516 if (rcStrict == VINF_SUCCESS)
7517 {
7518 *pcbLimit = (uint16_t)uTmp;
7519 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7520 }
7521 }
7522 if (rcStrict == VINF_SUCCESS)
7523 *pGCPtrBase = uTmp;
7524 }
7525 else
7526 {
7527 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7528 if (rcStrict == VINF_SUCCESS)
7529 {
7530 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7531 if (rcStrict == VINF_SUCCESS)
7532 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7533 }
7534 }
7535 }
7536 return rcStrict;
7537}
7538
7539
7540
7541/**
7542 * Stores a data byte.
7543 *
7544 * @returns Strict VBox status code.
7545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7546 * @param iSegReg The index of the segment register to use for
7547 * this access. The base and limits are checked.
7548 * @param GCPtrMem The address of the guest memory.
7549 * @param u8Value The value to store.
7550 */
7551VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7552{
7553 /* The lazy approach for now... */
7554 uint8_t *pu8Dst;
7555 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7556 if (rc == VINF_SUCCESS)
7557 {
7558 *pu8Dst = u8Value;
7559 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7560 }
7561 return rc;
7562}
7563
7564
7565#ifdef IEM_WITH_SETJMP
7566/**
7567 * Stores a data byte, longjmp on error.
7568 *
7569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7570 * @param iSegReg The index of the segment register to use for
7571 * this access. The base and limits are checked.
7572 * @param GCPtrMem The address of the guest memory.
7573 * @param u8Value The value to store.
7574 */
7575void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7576{
7577 /* The lazy approach for now... */
7578 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7579 *pu8Dst = u8Value;
7580 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7581}
7582#endif
7583
7584
7585/**
7586 * Stores a data word.
7587 *
7588 * @returns Strict VBox status code.
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 * @param u16Value The value to store.
7594 */
7595VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7596{
7597 /* The lazy approach for now... */
7598 uint16_t *pu16Dst;
7599 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7600 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7601 if (rc == VINF_SUCCESS)
7602 {
7603 *pu16Dst = u16Value;
7604 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7605 }
7606 return rc;
7607}
7608
7609
7610#ifdef IEM_WITH_SETJMP
7611/**
7612 * Stores a data word, longjmp on error.
7613 *
7614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7615 * @param iSegReg The index of the segment register to use for
7616 * this access. The base and limits are checked.
7617 * @param GCPtrMem The address of the guest memory.
7618 * @param u16Value The value to store.
7619 */
7620void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7621{
7622 /* The lazy approach for now... */
7623 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7624 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7625 *pu16Dst = u16Value;
7626 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7627}
7628#endif
7629
7630
7631/**
7632 * Stores a data dword.
7633 *
7634 * @returns Strict VBox status code.
7635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7636 * @param iSegReg The index of the segment register to use for
7637 * this access. The base and limits are checked.
7638 * @param GCPtrMem The address of the guest memory.
7639 * @param u32Value The value to store.
7640 */
7641VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7642{
7643 /* The lazy approach for now... */
7644 uint32_t *pu32Dst;
7645 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7646 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7647 if (rc == VINF_SUCCESS)
7648 {
7649 *pu32Dst = u32Value;
7650 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7651 }
7652 return rc;
7653}
7654
7655
7656#ifdef IEM_WITH_SETJMP
7657/**
7658 * Stores a data dword.
7659 *
7660 * @returns Strict VBox status code.
7661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7662 * @param iSegReg The index of the segment register to use for
7663 * this access. The base and limits are checked.
7664 * @param GCPtrMem The address of the guest memory.
7665 * @param u32Value The value to store.
7666 */
7667void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7668{
7669 /* The lazy approach for now... */
7670 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7671 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7672 *pu32Dst = u32Value;
7673 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7674}
7675#endif
7676
7677
7678/**
7679 * Stores a data qword.
7680 *
7681 * @returns Strict VBox status code.
7682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7683 * @param iSegReg The index of the segment register to use for
7684 * this access. The base and limits are checked.
7685 * @param GCPtrMem The address of the guest memory.
7686 * @param u64Value The value to store.
7687 */
7688VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7689{
7690 /* The lazy approach for now... */
7691 uint64_t *pu64Dst;
7692 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7693 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7694 if (rc == VINF_SUCCESS)
7695 {
7696 *pu64Dst = u64Value;
7697 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7698 }
7699 return rc;
7700}
7701
7702
7703#ifdef IEM_WITH_SETJMP
7704/**
7705 * Stores a data qword, longjmp on error.
7706 *
7707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7708 * @param iSegReg The index of the segment register to use for
7709 * this access. The base and limits are checked.
7710 * @param GCPtrMem The address of the guest memory.
7711 * @param u64Value The value to store.
7712 */
7713void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7714{
7715 /* The lazy approach for now... */
7716 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7717 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7718 *pu64Dst = u64Value;
7719 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7720}
7721#endif
7722
7723
7724/**
7725 * Stores a data dqword.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7729 * @param iSegReg The index of the segment register to use for
7730 * this access. The base and limits are checked.
7731 * @param GCPtrMem The address of the guest memory.
7732 * @param u128Value The value to store.
7733 */
7734VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7735{
7736 /* The lazy approach for now... */
7737 PRTUINT128U pu128Dst;
7738 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7739 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7740 if (rc == VINF_SUCCESS)
7741 {
7742 pu128Dst->au64[0] = u128Value.au64[0];
7743 pu128Dst->au64[1] = u128Value.au64[1];
7744 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7745 }
7746 return rc;
7747}
7748
7749
7750#ifdef IEM_WITH_SETJMP
7751/**
7752 * Stores a data dqword, longjmp on error.
7753 *
7754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7755 * @param iSegReg The index of the segment register to use for
7756 * this access. The base and limits are checked.
7757 * @param GCPtrMem The address of the guest memory.
7758 * @param u128Value The value to store.
7759 */
7760void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7761{
7762 /* The lazy approach for now... */
7763 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7764 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7765 pu128Dst->au64[0] = u128Value.au64[0];
7766 pu128Dst->au64[1] = u128Value.au64[1];
7767 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7768}
7769#endif
7770
7771
7772/**
7773 * Stores a data dqword, SSE aligned.
7774 *
7775 * @returns Strict VBox status code.
7776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7777 * @param iSegReg The index of the segment register to use for
7778 * this access. The base and limits are checked.
7779 * @param GCPtrMem The address of the guest memory.
7780 * @param u128Value The value to store.
7781 */
7782VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7783{
7784 /* The lazy approach for now... */
7785 PRTUINT128U pu128Dst;
7786 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7787 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7788 if (rc == VINF_SUCCESS)
7789 {
7790 pu128Dst->au64[0] = u128Value.au64[0];
7791 pu128Dst->au64[1] = u128Value.au64[1];
7792 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7793 }
7794 return rc;
7795}
7796
7797
7798#ifdef IEM_WITH_SETJMP
7799/**
7800 * Stores a data dqword, SSE aligned.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param iSegReg The index of the segment register to use for
7805 * this access. The base and limits are checked.
7806 * @param GCPtrMem The address of the guest memory.
7807 * @param u128Value The value to store.
7808 */
7809void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7810 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7811{
7812 /* The lazy approach for now... */
7813 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7814 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7815 pu128Dst->au64[0] = u128Value.au64[0];
7816 pu128Dst->au64[1] = u128Value.au64[1];
7817 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7818}
7819#endif
7820
7821
7822/**
7823 * Stores a data dqword.
7824 *
7825 * @returns Strict VBox status code.
7826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7827 * @param iSegReg The index of the segment register to use for
7828 * this access. The base and limits are checked.
7829 * @param GCPtrMem The address of the guest memory.
7830 * @param pu256Value Pointer to the value to store.
7831 */
7832VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7833{
7834 /* The lazy approach for now... */
7835 PRTUINT256U pu256Dst;
7836 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7837 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7838 if (rc == VINF_SUCCESS)
7839 {
7840 pu256Dst->au64[0] = pu256Value->au64[0];
7841 pu256Dst->au64[1] = pu256Value->au64[1];
7842 pu256Dst->au64[2] = pu256Value->au64[2];
7843 pu256Dst->au64[3] = pu256Value->au64[3];
7844 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7845 }
7846 return rc;
7847}
7848
7849
7850#ifdef IEM_WITH_SETJMP
7851/**
7852 * Stores a data dqword, longjmp on error.
7853 *
7854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7855 * @param iSegReg The index of the segment register to use for
7856 * this access. The base and limits are checked.
7857 * @param GCPtrMem The address of the guest memory.
7858 * @param pu256Value Pointer to the value to store.
7859 */
7860void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7861{
7862 /* The lazy approach for now... */
7863 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7864 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7865 pu256Dst->au64[0] = pu256Value->au64[0];
7866 pu256Dst->au64[1] = pu256Value->au64[1];
7867 pu256Dst->au64[2] = pu256Value->au64[2];
7868 pu256Dst->au64[3] = pu256Value->au64[3];
7869 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7870}
7871#endif
7872
7873
7874/**
7875 * Stores a data dqword, AVX \#GP(0) aligned.
7876 *
7877 * @returns Strict VBox status code.
7878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7879 * @param iSegReg The index of the segment register to use for
7880 * this access. The base and limits are checked.
7881 * @param GCPtrMem The address of the guest memory.
7882 * @param pu256Value Pointer to the value to store.
7883 */
7884VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7885{
7886 /* The lazy approach for now... */
7887 PRTUINT256U pu256Dst;
7888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7889 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7890 if (rc == VINF_SUCCESS)
7891 {
7892 pu256Dst->au64[0] = pu256Value->au64[0];
7893 pu256Dst->au64[1] = pu256Value->au64[1];
7894 pu256Dst->au64[2] = pu256Value->au64[2];
7895 pu256Dst->au64[3] = pu256Value->au64[3];
7896 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7897 }
7898 return rc;
7899}
7900
7901
7902#ifdef IEM_WITH_SETJMP
7903/**
7904 * Stores a data dqword, AVX aligned.
7905 *
7906 * @returns Strict VBox status code.
7907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7908 * @param iSegReg The index of the segment register to use for
7909 * this access. The base and limits are checked.
7910 * @param GCPtrMem The address of the guest memory.
7911 * @param pu256Value Pointer to the value to store.
7912 */
7913void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7914 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7915{
7916 /* The lazy approach for now... */
7917 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7918 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7919 pu256Dst->au64[0] = pu256Value->au64[0];
7920 pu256Dst->au64[1] = pu256Value->au64[1];
7921 pu256Dst->au64[2] = pu256Value->au64[2];
7922 pu256Dst->au64[3] = pu256Value->au64[3];
7923 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7924}
7925#endif
7926
7927
7928/**
7929 * Stores a descriptor register (sgdt, sidt).
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7933 * @param cbLimit The limit.
7934 * @param GCPtrBase The base address.
7935 * @param iSegReg The index of the segment register to use for
7936 * this access. The base and limits are checked.
7937 * @param GCPtrMem The address of the guest memory.
7938 */
7939VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7940{
7941 /*
7942 * The SIDT and SGDT instructions actually stores the data using two
7943 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7944 * does not respond to opsize prefixes.
7945 */
7946 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7947 if (rcStrict == VINF_SUCCESS)
7948 {
7949 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7950 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7951 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7952 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7953 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7954 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7955 else
7956 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7957 }
7958 return rcStrict;
7959}
7960
7961
7962/**
7963 * Pushes a word onto the stack.
7964 *
7965 * @returns Strict VBox status code.
7966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7967 * @param u16Value The value to push.
7968 */
7969VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7970{
7971 /* Increment the stack pointer. */
7972 uint64_t uNewRsp;
7973 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7974
7975 /* Write the word the lazy way. */
7976 uint16_t *pu16Dst;
7977 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7978 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7979 if (rc == VINF_SUCCESS)
7980 {
7981 *pu16Dst = u16Value;
7982 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7983 }
7984
7985 /* Commit the new RSP value unless we an access handler made trouble. */
7986 if (rc == VINF_SUCCESS)
7987 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7988
7989 return rc;
7990}
7991
7992
7993/**
7994 * Pushes a dword onto the stack.
7995 *
7996 * @returns Strict VBox status code.
7997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7998 * @param u32Value The value to push.
7999 */
8000VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8001{
8002 /* Increment the stack pointer. */
8003 uint64_t uNewRsp;
8004 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8005
8006 /* Write the dword the lazy way. */
8007 uint32_t *pu32Dst;
8008 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8009 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8010 if (rc == VINF_SUCCESS)
8011 {
8012 *pu32Dst = u32Value;
8013 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8014 }
8015
8016 /* Commit the new RSP value unless we an access handler made trouble. */
8017 if (rc == VINF_SUCCESS)
8018 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8019
8020 return rc;
8021}
8022
8023
8024/**
8025 * Pushes a dword segment register value onto the stack.
8026 *
8027 * @returns Strict VBox status code.
8028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8029 * @param u32Value The value to push.
8030 */
8031VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8032{
8033 /* Increment the stack pointer. */
8034 uint64_t uNewRsp;
8035 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8036
8037 /* The intel docs talks about zero extending the selector register
8038 value. My actual intel CPU here might be zero extending the value
8039 but it still only writes the lower word... */
8040 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8041 * happens when crossing an electric page boundrary, is the high word checked
8042 * for write accessibility or not? Probably it is. What about segment limits?
8043 * It appears this behavior is also shared with trap error codes.
8044 *
8045 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8046 * ancient hardware when it actually did change. */
8047 uint16_t *pu16Dst;
8048 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8049 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8050 if (rc == VINF_SUCCESS)
8051 {
8052 *pu16Dst = (uint16_t)u32Value;
8053 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8054 }
8055
8056 /* Commit the new RSP value unless we an access handler made trouble. */
8057 if (rc == VINF_SUCCESS)
8058 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8059
8060 return rc;
8061}
8062
8063
8064/**
8065 * Pushes a qword onto the stack.
8066 *
8067 * @returns Strict VBox status code.
8068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8069 * @param u64Value The value to push.
8070 */
8071VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8072{
8073 /* Increment the stack pointer. */
8074 uint64_t uNewRsp;
8075 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8076
8077 /* Write the word the lazy way. */
8078 uint64_t *pu64Dst;
8079 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8080 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8081 if (rc == VINF_SUCCESS)
8082 {
8083 *pu64Dst = u64Value;
8084 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8085 }
8086
8087 /* Commit the new RSP value unless we an access handler made trouble. */
8088 if (rc == VINF_SUCCESS)
8089 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8090
8091 return rc;
8092}
8093
8094
8095/**
8096 * Pops a word from the stack.
8097 *
8098 * @returns Strict VBox status code.
8099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8100 * @param pu16Value Where to store the popped value.
8101 */
8102VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8103{
8104 /* Increment the stack pointer. */
8105 uint64_t uNewRsp;
8106 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8107
8108 /* Write the word the lazy way. */
8109 uint16_t const *pu16Src;
8110 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8111 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8112 if (rc == VINF_SUCCESS)
8113 {
8114 *pu16Value = *pu16Src;
8115 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8116
8117 /* Commit the new RSP value. */
8118 if (rc == VINF_SUCCESS)
8119 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8120 }
8121
8122 return rc;
8123}
8124
8125
8126/**
8127 * Pops a dword from the stack.
8128 *
8129 * @returns Strict VBox status code.
8130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8131 * @param pu32Value Where to store the popped value.
8132 */
8133VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8134{
8135 /* Increment the stack pointer. */
8136 uint64_t uNewRsp;
8137 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8138
8139 /* Write the word the lazy way. */
8140 uint32_t const *pu32Src;
8141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8142 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8143 if (rc == VINF_SUCCESS)
8144 {
8145 *pu32Value = *pu32Src;
8146 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8147
8148 /* Commit the new RSP value. */
8149 if (rc == VINF_SUCCESS)
8150 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8151 }
8152
8153 return rc;
8154}
8155
8156
8157/**
8158 * Pops a qword from the stack.
8159 *
8160 * @returns Strict VBox status code.
8161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8162 * @param pu64Value Where to store the popped value.
8163 */
8164VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8165{
8166 /* Increment the stack pointer. */
8167 uint64_t uNewRsp;
8168 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8169
8170 /* Write the word the lazy way. */
8171 uint64_t const *pu64Src;
8172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8173 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8174 if (rc == VINF_SUCCESS)
8175 {
8176 *pu64Value = *pu64Src;
8177 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8178
8179 /* Commit the new RSP value. */
8180 if (rc == VINF_SUCCESS)
8181 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8182 }
8183
8184 return rc;
8185}
8186
8187
8188/**
8189 * Pushes a word onto the stack, using a temporary stack pointer.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param u16Value The value to push.
8194 * @param pTmpRsp Pointer to the temporary stack pointer.
8195 */
8196VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8197{
8198 /* Increment the stack pointer. */
8199 RTUINT64U NewRsp = *pTmpRsp;
8200 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8201
8202 /* Write the word the lazy way. */
8203 uint16_t *pu16Dst;
8204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8205 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8206 if (rc == VINF_SUCCESS)
8207 {
8208 *pu16Dst = u16Value;
8209 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8210 }
8211
8212 /* Commit the new RSP value unless we an access handler made trouble. */
8213 if (rc == VINF_SUCCESS)
8214 *pTmpRsp = NewRsp;
8215
8216 return rc;
8217}
8218
8219
8220/**
8221 * Pushes a dword onto the stack, using a temporary stack pointer.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8225 * @param u32Value The value to push.
8226 * @param pTmpRsp Pointer to the temporary stack pointer.
8227 */
8228VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8229{
8230 /* Increment the stack pointer. */
8231 RTUINT64U NewRsp = *pTmpRsp;
8232 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8233
8234 /* Write the word the lazy way. */
8235 uint32_t *pu32Dst;
8236 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8237 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8238 if (rc == VINF_SUCCESS)
8239 {
8240 *pu32Dst = u32Value;
8241 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8242 }
8243
8244 /* Commit the new RSP value unless we an access handler made trouble. */
8245 if (rc == VINF_SUCCESS)
8246 *pTmpRsp = NewRsp;
8247
8248 return rc;
8249}
8250
8251
8252/**
8253 * Pushes a dword onto the stack, using a temporary stack pointer.
8254 *
8255 * @returns Strict VBox status code.
8256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8257 * @param u64Value The value to push.
8258 * @param pTmpRsp Pointer to the temporary stack pointer.
8259 */
8260VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8261{
8262 /* Increment the stack pointer. */
8263 RTUINT64U NewRsp = *pTmpRsp;
8264 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8265
8266 /* Write the word the lazy way. */
8267 uint64_t *pu64Dst;
8268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8269 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8270 if (rc == VINF_SUCCESS)
8271 {
8272 *pu64Dst = u64Value;
8273 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8274 }
8275
8276 /* Commit the new RSP value unless we an access handler made trouble. */
8277 if (rc == VINF_SUCCESS)
8278 *pTmpRsp = NewRsp;
8279
8280 return rc;
8281}
8282
8283
8284/**
8285 * Pops a word from the stack, using a temporary stack pointer.
8286 *
8287 * @returns Strict VBox status code.
8288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8289 * @param pu16Value Where to store the popped value.
8290 * @param pTmpRsp Pointer to the temporary stack pointer.
8291 */
8292VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8293{
8294 /* Increment the stack pointer. */
8295 RTUINT64U NewRsp = *pTmpRsp;
8296 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8297
8298 /* Write the word the lazy way. */
8299 uint16_t const *pu16Src;
8300 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8301 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8302 if (rc == VINF_SUCCESS)
8303 {
8304 *pu16Value = *pu16Src;
8305 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8306
8307 /* Commit the new RSP value. */
8308 if (rc == VINF_SUCCESS)
8309 *pTmpRsp = NewRsp;
8310 }
8311
8312 return rc;
8313}
8314
8315
8316/**
8317 * Pops a dword from the stack, using a temporary stack pointer.
8318 *
8319 * @returns Strict VBox status code.
8320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8321 * @param pu32Value Where to store the popped value.
8322 * @param pTmpRsp Pointer to the temporary stack pointer.
8323 */
8324VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8325{
8326 /* Increment the stack pointer. */
8327 RTUINT64U NewRsp = *pTmpRsp;
8328 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8329
8330 /* Write the word the lazy way. */
8331 uint32_t const *pu32Src;
8332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8333 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8334 if (rc == VINF_SUCCESS)
8335 {
8336 *pu32Value = *pu32Src;
8337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8338
8339 /* Commit the new RSP value. */
8340 if (rc == VINF_SUCCESS)
8341 *pTmpRsp = NewRsp;
8342 }
8343
8344 return rc;
8345}
8346
8347
8348/**
8349 * Pops a qword from the stack, using a temporary stack pointer.
8350 *
8351 * @returns Strict VBox status code.
8352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8353 * @param pu64Value Where to store the popped value.
8354 * @param pTmpRsp Pointer to the temporary stack pointer.
8355 */
8356VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8357{
8358 /* Increment the stack pointer. */
8359 RTUINT64U NewRsp = *pTmpRsp;
8360 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8361
8362 /* Write the word the lazy way. */
8363 uint64_t const *pu64Src;
8364 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8365 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8366 if (rcStrict == VINF_SUCCESS)
8367 {
8368 *pu64Value = *pu64Src;
8369 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8370
8371 /* Commit the new RSP value. */
8372 if (rcStrict == VINF_SUCCESS)
8373 *pTmpRsp = NewRsp;
8374 }
8375
8376 return rcStrict;
8377}
8378
8379
8380/**
8381 * Begin a special stack push (used by interrupt, exceptions and such).
8382 *
8383 * This will raise \#SS or \#PF if appropriate.
8384 *
8385 * @returns Strict VBox status code.
8386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8387 * @param cbMem The number of bytes to push onto the stack.
8388 * @param cbAlign The alignment mask (7, 3, 1).
8389 * @param ppvMem Where to return the pointer to the stack memory.
8390 * As with the other memory functions this could be
8391 * direct access or bounce buffered access, so
8392 * don't commit register until the commit call
8393 * succeeds.
8394 * @param puNewRsp Where to return the new RSP value. This must be
8395 * passed unchanged to
8396 * iemMemStackPushCommitSpecial().
8397 */
8398VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8399 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8400{
8401 Assert(cbMem < UINT8_MAX);
8402 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8403 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8404 IEM_ACCESS_STACK_W, cbAlign);
8405}
8406
8407
8408/**
8409 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8410 *
8411 * This will update the rSP.
8412 *
8413 * @returns Strict VBox status code.
8414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8415 * @param pvMem The pointer returned by
8416 * iemMemStackPushBeginSpecial().
8417 * @param uNewRsp The new RSP value returned by
8418 * iemMemStackPushBeginSpecial().
8419 */
8420VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8421{
8422 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8423 if (rcStrict == VINF_SUCCESS)
8424 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8425 return rcStrict;
8426}
8427
8428
8429/**
8430 * Begin a special stack pop (used by iret, retf and such).
8431 *
8432 * This will raise \#SS or \#PF if appropriate.
8433 *
8434 * @returns Strict VBox status code.
8435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8436 * @param cbMem The number of bytes to pop from the stack.
8437 * @param cbAlign The alignment mask (7, 3, 1).
8438 * @param ppvMem Where to return the pointer to the stack memory.
8439 * @param puNewRsp Where to return the new RSP value. This must be
8440 * assigned to CPUMCTX::rsp manually some time
8441 * after iemMemStackPopDoneSpecial() has been
8442 * called.
8443 */
8444VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8445 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8446{
8447 Assert(cbMem < UINT8_MAX);
8448 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8449 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8450}
8451
8452
8453/**
8454 * Continue a special stack pop (used by iret and retf), for the purpose of
8455 * retrieving a new stack pointer.
8456 *
8457 * This will raise \#SS or \#PF if appropriate.
8458 *
8459 * @returns Strict VBox status code.
8460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8461 * @param off Offset from the top of the stack. This is zero
8462 * except in the retf case.
8463 * @param cbMem The number of bytes to pop from the stack.
8464 * @param ppvMem Where to return the pointer to the stack memory.
8465 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8466 * return this because all use of this function is
8467 * to retrieve a new value and anything we return
8468 * here would be discarded.)
8469 */
8470VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8471 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8472{
8473 Assert(cbMem < UINT8_MAX);
8474
8475 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8476 RTGCPTR GCPtrTop;
8477 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8478 GCPtrTop = uCurNewRsp;
8479 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8480 GCPtrTop = (uint32_t)uCurNewRsp;
8481 else
8482 GCPtrTop = (uint16_t)uCurNewRsp;
8483
8484 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8485 0 /* checked in iemMemStackPopBeginSpecial */);
8486}
8487
8488
8489/**
8490 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8491 * iemMemStackPopContinueSpecial).
8492 *
8493 * The caller will manually commit the rSP.
8494 *
8495 * @returns Strict VBox status code.
8496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8497 * @param pvMem The pointer returned by
8498 * iemMemStackPopBeginSpecial() or
8499 * iemMemStackPopContinueSpecial().
8500 */
8501VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8502{
8503 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8504}
8505
8506
8507/**
8508 * Fetches a system table byte.
8509 *
8510 * @returns Strict VBox status code.
8511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8512 * @param pbDst Where to return the byte.
8513 * @param iSegReg The index of the segment register to use for
8514 * this access. The base and limits are checked.
8515 * @param GCPtrMem The address of the guest memory.
8516 */
8517VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8518{
8519 /* The lazy approach for now... */
8520 uint8_t const *pbSrc;
8521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8522 if (rc == VINF_SUCCESS)
8523 {
8524 *pbDst = *pbSrc;
8525 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8526 }
8527 return rc;
8528}
8529
8530
8531/**
8532 * Fetches a system table word.
8533 *
8534 * @returns Strict VBox status code.
8535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8536 * @param pu16Dst Where to return the word.
8537 * @param iSegReg The index of the segment register to use for
8538 * this access. The base and limits are checked.
8539 * @param GCPtrMem The address of the guest memory.
8540 */
8541VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8542{
8543 /* The lazy approach for now... */
8544 uint16_t const *pu16Src;
8545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8546 if (rc == VINF_SUCCESS)
8547 {
8548 *pu16Dst = *pu16Src;
8549 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8550 }
8551 return rc;
8552}
8553
8554
8555/**
8556 * Fetches a system table dword.
8557 *
8558 * @returns Strict VBox status code.
8559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8560 * @param pu32Dst Where to return the dword.
8561 * @param iSegReg The index of the segment register to use for
8562 * this access. The base and limits are checked.
8563 * @param GCPtrMem The address of the guest memory.
8564 */
8565VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8566{
8567 /* The lazy approach for now... */
8568 uint32_t const *pu32Src;
8569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8570 if (rc == VINF_SUCCESS)
8571 {
8572 *pu32Dst = *pu32Src;
8573 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8574 }
8575 return rc;
8576}
8577
8578
8579/**
8580 * Fetches a system table qword.
8581 *
8582 * @returns Strict VBox status code.
8583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8584 * @param pu64Dst Where to return the qword.
8585 * @param iSegReg The index of the segment register to use for
8586 * this access. The base and limits are checked.
8587 * @param GCPtrMem The address of the guest memory.
8588 */
8589VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8590{
8591 /* The lazy approach for now... */
8592 uint64_t const *pu64Src;
8593 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8594 if (rc == VINF_SUCCESS)
8595 {
8596 *pu64Dst = *pu64Src;
8597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8598 }
8599 return rc;
8600}
8601
8602
8603/**
8604 * Fetches a descriptor table entry with caller specified error code.
8605 *
8606 * @returns Strict VBox status code.
8607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8608 * @param pDesc Where to return the descriptor table entry.
8609 * @param uSel The selector which table entry to fetch.
8610 * @param uXcpt The exception to raise on table lookup error.
8611 * @param uErrorCode The error code associated with the exception.
8612 */
8613static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8614 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8615{
8616 AssertPtr(pDesc);
8617 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8618
8619 /** @todo did the 286 require all 8 bytes to be accessible? */
8620 /*
8621 * Get the selector table base and check bounds.
8622 */
8623 RTGCPTR GCPtrBase;
8624 if (uSel & X86_SEL_LDT)
8625 {
8626 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8627 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8628 {
8629 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8630 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8631 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8632 uErrorCode, 0);
8633 }
8634
8635 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8636 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8637 }
8638 else
8639 {
8640 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8641 {
8642 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8643 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8644 uErrorCode, 0);
8645 }
8646 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8647 }
8648
8649 /*
8650 * Read the legacy descriptor and maybe the long mode extensions if
8651 * required.
8652 */
8653 VBOXSTRICTRC rcStrict;
8654 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8655 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8656 else
8657 {
8658 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8659 if (rcStrict == VINF_SUCCESS)
8660 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8661 if (rcStrict == VINF_SUCCESS)
8662 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8663 if (rcStrict == VINF_SUCCESS)
8664 pDesc->Legacy.au16[3] = 0;
8665 else
8666 return rcStrict;
8667 }
8668
8669 if (rcStrict == VINF_SUCCESS)
8670 {
8671 if ( !IEM_IS_LONG_MODE(pVCpu)
8672 || pDesc->Legacy.Gen.u1DescType)
8673 pDesc->Long.au64[1] = 0;
8674 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8675 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8676 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8677 else
8678 {
8679 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8680 /** @todo is this the right exception? */
8681 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8682 }
8683 }
8684 return rcStrict;
8685}
8686
8687
8688/**
8689 * Fetches a descriptor table entry.
8690 *
8691 * @returns Strict VBox status code.
8692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8693 * @param pDesc Where to return the descriptor table entry.
8694 * @param uSel The selector which table entry to fetch.
8695 * @param uXcpt The exception to raise on table lookup error.
8696 */
8697VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8698{
8699 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8700}
8701
8702
8703/**
8704 * Marks the selector descriptor as accessed (only non-system descriptors).
8705 *
8706 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8707 * will therefore skip the limit checks.
8708 *
8709 * @returns Strict VBox status code.
8710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8711 * @param uSel The selector.
8712 */
8713VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8714{
8715 /*
8716 * Get the selector table base and calculate the entry address.
8717 */
8718 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8719 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8720 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8721 GCPtr += uSel & X86_SEL_MASK;
8722
8723 /*
8724 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8725 * ugly stuff to avoid this. This will make sure it's an atomic access
8726 * as well more or less remove any question about 8-bit or 32-bit accesss.
8727 */
8728 VBOXSTRICTRC rcStrict;
8729 uint32_t volatile *pu32;
8730 if ((GCPtr & 3) == 0)
8731 {
8732 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8733 GCPtr += 2 + 2;
8734 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8735 if (rcStrict != VINF_SUCCESS)
8736 return rcStrict;
8737 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8738 }
8739 else
8740 {
8741 /* The misaligned GDT/LDT case, map the whole thing. */
8742 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8743 if (rcStrict != VINF_SUCCESS)
8744 return rcStrict;
8745 switch ((uintptr_t)pu32 & 3)
8746 {
8747 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8748 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8749 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8750 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8751 }
8752 }
8753
8754 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8755}
8756
8757/** @} */
8758
8759/** @name Opcode Helpers.
8760 * @{
8761 */
8762
8763/**
8764 * Calculates the effective address of a ModR/M memory operand.
8765 *
8766 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8767 *
8768 * @return Strict VBox status code.
8769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8770 * @param bRm The ModRM byte.
8771 * @param cbImmAndRspOffset - First byte: The size of any immediate
8772 * following the effective address opcode bytes
8773 * (only for RIP relative addressing).
8774 * - Second byte: RSP displacement (for POP [ESP]).
8775 * @param pGCPtrEff Where to return the effective address.
8776 */
8777VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8778{
8779 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8780# define SET_SS_DEF() \
8781 do \
8782 { \
8783 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8784 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8785 } while (0)
8786
8787 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8788 {
8789/** @todo Check the effective address size crap! */
8790 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8791 {
8792 uint16_t u16EffAddr;
8793
8794 /* Handle the disp16 form with no registers first. */
8795 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8796 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8797 else
8798 {
8799 /* Get the displacment. */
8800 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8801 {
8802 case 0: u16EffAddr = 0; break;
8803 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8804 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8805 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8806 }
8807
8808 /* Add the base and index registers to the disp. */
8809 switch (bRm & X86_MODRM_RM_MASK)
8810 {
8811 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8812 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8813 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8814 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8815 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8816 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8817 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8818 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8819 }
8820 }
8821
8822 *pGCPtrEff = u16EffAddr;
8823 }
8824 else
8825 {
8826 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8827 uint32_t u32EffAddr;
8828
8829 /* Handle the disp32 form with no registers first. */
8830 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8831 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8832 else
8833 {
8834 /* Get the register (or SIB) value. */
8835 switch ((bRm & X86_MODRM_RM_MASK))
8836 {
8837 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8838 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8839 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8840 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8841 case 4: /* SIB */
8842 {
8843 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8844
8845 /* Get the index and scale it. */
8846 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8847 {
8848 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8849 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8850 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8851 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8852 case 4: u32EffAddr = 0; /*none */ break;
8853 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8854 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8855 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8856 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8857 }
8858 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8859
8860 /* add base */
8861 switch (bSib & X86_SIB_BASE_MASK)
8862 {
8863 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8864 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8865 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8866 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8867 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8868 case 5:
8869 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8870 {
8871 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8872 SET_SS_DEF();
8873 }
8874 else
8875 {
8876 uint32_t u32Disp;
8877 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8878 u32EffAddr += u32Disp;
8879 }
8880 break;
8881 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8882 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8884 }
8885 break;
8886 }
8887 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8888 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8889 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8890 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8891 }
8892
8893 /* Get and add the displacement. */
8894 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8895 {
8896 case 0:
8897 break;
8898 case 1:
8899 {
8900 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8901 u32EffAddr += i8Disp;
8902 break;
8903 }
8904 case 2:
8905 {
8906 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8907 u32EffAddr += u32Disp;
8908 break;
8909 }
8910 default:
8911 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8912 }
8913
8914 }
8915 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8916 *pGCPtrEff = u32EffAddr;
8917 else
8918 {
8919 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8920 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8921 }
8922 }
8923 }
8924 else
8925 {
8926 uint64_t u64EffAddr;
8927
8928 /* Handle the rip+disp32 form with no registers first. */
8929 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8930 {
8931 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8932 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8933 }
8934 else
8935 {
8936 /* Get the register (or SIB) value. */
8937 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8938 {
8939 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8940 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8941 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8942 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8943 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8944 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8945 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8946 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8947 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8948 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8949 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8950 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8951 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8952 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8953 /* SIB */
8954 case 4:
8955 case 12:
8956 {
8957 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8958
8959 /* Get the index and scale it. */
8960 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8961 {
8962 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8963 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8964 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8965 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8966 case 4: u64EffAddr = 0; /*none */ break;
8967 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8968 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8969 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8970 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8971 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8972 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8973 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8974 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8975 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8976 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8977 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8978 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8979 }
8980 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8981
8982 /* add base */
8983 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8984 {
8985 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8986 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8987 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8988 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8989 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8990 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8991 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8992 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8993 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8994 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8995 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8996 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8997 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8998 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8999 /* complicated encodings */
9000 case 5:
9001 case 13:
9002 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9003 {
9004 if (!pVCpu->iem.s.uRexB)
9005 {
9006 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9007 SET_SS_DEF();
9008 }
9009 else
9010 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9011 }
9012 else
9013 {
9014 uint32_t u32Disp;
9015 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9016 u64EffAddr += (int32_t)u32Disp;
9017 }
9018 break;
9019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9020 }
9021 break;
9022 }
9023 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9024 }
9025
9026 /* Get and add the displacement. */
9027 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9028 {
9029 case 0:
9030 break;
9031 case 1:
9032 {
9033 int8_t i8Disp;
9034 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9035 u64EffAddr += i8Disp;
9036 break;
9037 }
9038 case 2:
9039 {
9040 uint32_t u32Disp;
9041 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9042 u64EffAddr += (int32_t)u32Disp;
9043 break;
9044 }
9045 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9046 }
9047
9048 }
9049
9050 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9051 *pGCPtrEff = u64EffAddr;
9052 else
9053 {
9054 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9055 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9056 }
9057 }
9058
9059 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9060 return VINF_SUCCESS;
9061}
9062
9063
9064#ifdef IEM_WITH_SETJMP
9065/**
9066 * Calculates the effective address of a ModR/M memory operand.
9067 *
9068 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9069 *
9070 * May longjmp on internal error.
9071 *
9072 * @return The effective address.
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param bRm The ModRM byte.
9075 * @param cbImmAndRspOffset - First byte: The size of any immediate
9076 * following the effective address opcode bytes
9077 * (only for RIP relative addressing).
9078 * - Second byte: RSP displacement (for POP [ESP]).
9079 */
9080RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9081{
9082 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9083# define SET_SS_DEF() \
9084 do \
9085 { \
9086 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9087 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9088 } while (0)
9089
9090 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9091 {
9092/** @todo Check the effective address size crap! */
9093 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9094 {
9095 uint16_t u16EffAddr;
9096
9097 /* Handle the disp16 form with no registers first. */
9098 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9099 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9100 else
9101 {
9102 /* Get the displacment. */
9103 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9104 {
9105 case 0: u16EffAddr = 0; break;
9106 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9107 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9108 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9109 }
9110
9111 /* Add the base and index registers to the disp. */
9112 switch (bRm & X86_MODRM_RM_MASK)
9113 {
9114 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9115 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9116 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9117 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9118 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9119 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9120 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9121 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9122 }
9123 }
9124
9125 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9126 return u16EffAddr;
9127 }
9128
9129 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9130 uint32_t u32EffAddr;
9131
9132 /* Handle the disp32 form with no registers first. */
9133 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9134 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9135 else
9136 {
9137 /* Get the register (or SIB) value. */
9138 switch ((bRm & X86_MODRM_RM_MASK))
9139 {
9140 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9141 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9142 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9143 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9144 case 4: /* SIB */
9145 {
9146 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9147
9148 /* Get the index and scale it. */
9149 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9150 {
9151 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9152 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9153 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9154 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9155 case 4: u32EffAddr = 0; /*none */ break;
9156 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9157 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9158 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9159 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9160 }
9161 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9162
9163 /* add base */
9164 switch (bSib & X86_SIB_BASE_MASK)
9165 {
9166 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9167 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9168 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9169 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9170 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9171 case 5:
9172 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9173 {
9174 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9175 SET_SS_DEF();
9176 }
9177 else
9178 {
9179 uint32_t u32Disp;
9180 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9181 u32EffAddr += u32Disp;
9182 }
9183 break;
9184 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9185 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9186 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9187 }
9188 break;
9189 }
9190 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9191 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9192 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9193 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9194 }
9195
9196 /* Get and add the displacement. */
9197 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9198 {
9199 case 0:
9200 break;
9201 case 1:
9202 {
9203 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9204 u32EffAddr += i8Disp;
9205 break;
9206 }
9207 case 2:
9208 {
9209 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9210 u32EffAddr += u32Disp;
9211 break;
9212 }
9213 default:
9214 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9215 }
9216 }
9217
9218 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9219 {
9220 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9221 return u32EffAddr;
9222 }
9223 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9224 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9225 return u32EffAddr & UINT16_MAX;
9226 }
9227
9228 uint64_t u64EffAddr;
9229
9230 /* Handle the rip+disp32 form with no registers first. */
9231 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9232 {
9233 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9234 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9235 }
9236 else
9237 {
9238 /* Get the register (or SIB) value. */
9239 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9240 {
9241 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9242 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9243 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9244 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9245 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9246 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9247 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9248 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9249 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9250 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9251 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9252 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9253 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9254 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9255 /* SIB */
9256 case 4:
9257 case 12:
9258 {
9259 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9260
9261 /* Get the index and scale it. */
9262 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9263 {
9264 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9265 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9266 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9267 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9268 case 4: u64EffAddr = 0; /*none */ break;
9269 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9270 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9271 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9272 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9273 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9274 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9275 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9276 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9277 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9278 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9279 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9280 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9281 }
9282 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9283
9284 /* add base */
9285 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9286 {
9287 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9288 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9289 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9290 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9291 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9292 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9293 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9294 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9295 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9296 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9297 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9298 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9299 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9300 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9301 /* complicated encodings */
9302 case 5:
9303 case 13:
9304 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9305 {
9306 if (!pVCpu->iem.s.uRexB)
9307 {
9308 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9309 SET_SS_DEF();
9310 }
9311 else
9312 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9313 }
9314 else
9315 {
9316 uint32_t u32Disp;
9317 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9318 u64EffAddr += (int32_t)u32Disp;
9319 }
9320 break;
9321 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9322 }
9323 break;
9324 }
9325 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9326 }
9327
9328 /* Get and add the displacement. */
9329 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9330 {
9331 case 0:
9332 break;
9333 case 1:
9334 {
9335 int8_t i8Disp;
9336 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9337 u64EffAddr += i8Disp;
9338 break;
9339 }
9340 case 2:
9341 {
9342 uint32_t u32Disp;
9343 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9344 u64EffAddr += (int32_t)u32Disp;
9345 break;
9346 }
9347 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9348 }
9349
9350 }
9351
9352 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9353 {
9354 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9355 return u64EffAddr;
9356 }
9357 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9358 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9359 return u64EffAddr & UINT32_MAX;
9360}
9361#endif /* IEM_WITH_SETJMP */
9362
9363
9364/**
9365 * Calculates the effective address of a ModR/M memory operand, extended version
9366 * for use in the recompilers.
9367 *
9368 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9369 *
9370 * @return Strict VBox status code.
9371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9372 * @param bRm The ModRM byte.
9373 * @param cbImmAndRspOffset - First byte: The size of any immediate
9374 * following the effective address opcode bytes
9375 * (only for RIP relative addressing).
9376 * - Second byte: RSP displacement (for POP [ESP]).
9377 * @param pGCPtrEff Where to return the effective address.
9378 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9379 * SIB byte (bits 39:32).
9380 */
9381VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9382{
9383 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9384# define SET_SS_DEF() \
9385 do \
9386 { \
9387 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9388 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9389 } while (0)
9390
9391 uint64_t uInfo;
9392 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9393 {
9394/** @todo Check the effective address size crap! */
9395 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9396 {
9397 uint16_t u16EffAddr;
9398
9399 /* Handle the disp16 form with no registers first. */
9400 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9401 {
9402 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9403 uInfo = u16EffAddr;
9404 }
9405 else
9406 {
9407 /* Get the displacment. */
9408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9409 {
9410 case 0: u16EffAddr = 0; break;
9411 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9412 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9413 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9414 }
9415 uInfo = u16EffAddr;
9416
9417 /* Add the base and index registers to the disp. */
9418 switch (bRm & X86_MODRM_RM_MASK)
9419 {
9420 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9421 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9422 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9423 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9424 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9425 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9426 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9427 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9428 }
9429 }
9430
9431 *pGCPtrEff = u16EffAddr;
9432 }
9433 else
9434 {
9435 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9436 uint32_t u32EffAddr;
9437
9438 /* Handle the disp32 form with no registers first. */
9439 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9440 {
9441 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9442 uInfo = u32EffAddr;
9443 }
9444 else
9445 {
9446 /* Get the register (or SIB) value. */
9447 uInfo = 0;
9448 switch ((bRm & X86_MODRM_RM_MASK))
9449 {
9450 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9451 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9452 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9453 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9454 case 4: /* SIB */
9455 {
9456 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9457 uInfo = (uint64_t)bSib << 32;
9458
9459 /* Get the index and scale it. */
9460 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9461 {
9462 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9463 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9464 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9465 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9466 case 4: u32EffAddr = 0; /*none */ break;
9467 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9468 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9469 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9470 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9471 }
9472 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9473
9474 /* add base */
9475 switch (bSib & X86_SIB_BASE_MASK)
9476 {
9477 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9478 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9479 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9480 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9481 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9482 case 5:
9483 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9484 {
9485 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9486 SET_SS_DEF();
9487 }
9488 else
9489 {
9490 uint32_t u32Disp;
9491 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9492 u32EffAddr += u32Disp;
9493 uInfo |= u32Disp;
9494 }
9495 break;
9496 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9497 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9499 }
9500 break;
9501 }
9502 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9503 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9504 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9506 }
9507
9508 /* Get and add the displacement. */
9509 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9510 {
9511 case 0:
9512 break;
9513 case 1:
9514 {
9515 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9516 u32EffAddr += i8Disp;
9517 uInfo |= (uint32_t)(int32_t)i8Disp;
9518 break;
9519 }
9520 case 2:
9521 {
9522 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9523 u32EffAddr += u32Disp;
9524 uInfo |= (uint32_t)u32Disp;
9525 break;
9526 }
9527 default:
9528 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9529 }
9530
9531 }
9532 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9533 *pGCPtrEff = u32EffAddr;
9534 else
9535 {
9536 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9537 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9538 }
9539 }
9540 }
9541 else
9542 {
9543 uint64_t u64EffAddr;
9544
9545 /* Handle the rip+disp32 form with no registers first. */
9546 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9547 {
9548 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9549 uInfo = (uint32_t)u64EffAddr;
9550 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9551 }
9552 else
9553 {
9554 /* Get the register (or SIB) value. */
9555 uInfo = 0;
9556 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9557 {
9558 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9559 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9560 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9561 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9562 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9563 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9564 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9565 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9566 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9567 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9568 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9569 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9570 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9571 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9572 /* SIB */
9573 case 4:
9574 case 12:
9575 {
9576 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9577 uInfo = (uint64_t)bSib << 32;
9578
9579 /* Get the index and scale it. */
9580 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9581 {
9582 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9583 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9584 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9585 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9586 case 4: u64EffAddr = 0; /*none */ break;
9587 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9588 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9589 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9590 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9591 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9592 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9593 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9594 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9595 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9596 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9597 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9599 }
9600 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9601
9602 /* add base */
9603 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9604 {
9605 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9606 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9607 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9608 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9609 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9610 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9611 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9612 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9613 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9614 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9615 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9616 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9617 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9618 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9619 /* complicated encodings */
9620 case 5:
9621 case 13:
9622 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9623 {
9624 if (!pVCpu->iem.s.uRexB)
9625 {
9626 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9627 SET_SS_DEF();
9628 }
9629 else
9630 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9631 }
9632 else
9633 {
9634 uint32_t u32Disp;
9635 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9636 u64EffAddr += (int32_t)u32Disp;
9637 uInfo |= u32Disp;
9638 }
9639 break;
9640 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9641 }
9642 break;
9643 }
9644 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9645 }
9646
9647 /* Get and add the displacement. */
9648 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9649 {
9650 case 0:
9651 break;
9652 case 1:
9653 {
9654 int8_t i8Disp;
9655 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9656 u64EffAddr += i8Disp;
9657 uInfo |= (uint32_t)(int32_t)i8Disp;
9658 break;
9659 }
9660 case 2:
9661 {
9662 uint32_t u32Disp;
9663 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9664 u64EffAddr += (int32_t)u32Disp;
9665 uInfo |= u32Disp;
9666 break;
9667 }
9668 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9669 }
9670
9671 }
9672
9673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9674 *pGCPtrEff = u64EffAddr;
9675 else
9676 {
9677 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9678 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9679 }
9680 }
9681 *puInfo = uInfo;
9682
9683 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9684 return VINF_SUCCESS;
9685}
9686
9687
9688#ifdef IEM_WITH_SETJMP
9689/**
9690 * Calculates the effective address of a ModR/M memory operand, extended version
9691 * for use in the recompilers.
9692 *
9693 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9694 *
9695 * May longjmp on internal error.
9696 *
9697 * @return The effective address.
9698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9699 * @param bRm The ModRM byte.
9700 * @param cbImmAndRspOffset - First byte: The size of any immediate
9701 * following the effective address opcode bytes
9702 * (only for RIP relative addressing).
9703 * - Second byte: RSP displacement (for POP [ESP]).
9704 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9705 * SIB byte (bits 39:32).
9706 */
9707RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP
9708{
9709 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9710# define SET_SS_DEF() \
9711 do \
9712 { \
9713 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9714 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9715 } while (0)
9716
9717 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9718 {
9719/** @todo Check the effective address size crap! */
9720 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9721 {
9722 uint16_t u16EffAddr;
9723
9724 /* Handle the disp16 form with no registers first. */
9725 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9726 {
9727 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9728 *puInfo = u16EffAddr;
9729 }
9730 else
9731 {
9732 /* Get the displacment. */
9733 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9734 {
9735 case 0: u16EffAddr = 0; break;
9736 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9737 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9738 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9739 }
9740 *puInfo = u16EffAddr;
9741
9742 /* Add the base and index registers to the disp. */
9743 switch (bRm & X86_MODRM_RM_MASK)
9744 {
9745 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9746 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9747 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9748 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9749 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9750 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9751 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9752 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9753 }
9754 }
9755
9756 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo));
9757 return u16EffAddr;
9758 }
9759
9760 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9761 uint32_t u32EffAddr;
9762 uint64_t uInfo;
9763
9764 /* Handle the disp32 form with no registers first. */
9765 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9766 {
9767 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9768 uInfo = u32EffAddr;
9769 }
9770 else
9771 {
9772 /* Get the register (or SIB) value. */
9773 uInfo = 0;
9774 switch ((bRm & X86_MODRM_RM_MASK))
9775 {
9776 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9777 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9778 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9779 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9780 case 4: /* SIB */
9781 {
9782 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9783 uInfo = (uint64_t)bSib << 32;
9784
9785 /* Get the index and scale it. */
9786 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9787 {
9788 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9789 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9790 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9791 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9792 case 4: u32EffAddr = 0; /*none */ break;
9793 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9794 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9795 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9796 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9797 }
9798 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9799
9800 /* add base */
9801 switch (bSib & X86_SIB_BASE_MASK)
9802 {
9803 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9804 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9805 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9806 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9807 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9808 case 5:
9809 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9810 {
9811 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9812 SET_SS_DEF();
9813 }
9814 else
9815 {
9816 uint32_t u32Disp;
9817 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9818 u32EffAddr += u32Disp;
9819 uInfo |= u32Disp;
9820 }
9821 break;
9822 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9823 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9824 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9825 }
9826 break;
9827 }
9828 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9829 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9830 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9831 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9832 }
9833
9834 /* Get and add the displacement. */
9835 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9836 {
9837 case 0:
9838 break;
9839 case 1:
9840 {
9841 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9842 u32EffAddr += i8Disp;
9843 uInfo |= (uint32_t)(int32_t)i8Disp;
9844 break;
9845 }
9846 case 2:
9847 {
9848 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9849 u32EffAddr += u32Disp;
9850 uInfo |= u32Disp;
9851 break;
9852 }
9853 default:
9854 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9855 }
9856 }
9857
9858 *puInfo = uInfo;
9859 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo));
9860 return u32EffAddr;
9861 }
9862
9863 uint64_t u64EffAddr;
9864 uint64_t uInfo;
9865
9866 /* Handle the rip+disp32 form with no registers first. */
9867 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9868 {
9869 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9870 uInfo = (uint32_t)u64EffAddr;
9871 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9872 }
9873 else
9874 {
9875 /* Get the register (or SIB) value. */
9876 uInfo = 0;
9877 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9878 {
9879 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9880 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9881 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9882 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9883 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9884 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9885 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9886 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9887 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9888 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9889 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9890 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9891 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9892 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9893 /* SIB */
9894 case 4:
9895 case 12:
9896 {
9897 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9898 uInfo = (uint64_t)bSib << 32;
9899
9900 /* Get the index and scale it. */
9901 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9902 {
9903 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9904 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9905 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9906 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9907 case 4: u64EffAddr = 0; /*none */ break;
9908 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9909 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9910 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9911 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9912 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9913 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9914 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9915 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9916 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9917 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9918 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9919 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9920 }
9921 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9922
9923 /* add base */
9924 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9925 {
9926 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9927 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9928 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9929 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9930 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9931 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9932 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9933 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9934 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9935 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9936 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9937 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9938 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9939 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9940 /* complicated encodings */
9941 case 5:
9942 case 13:
9943 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9944 {
9945 if (!pVCpu->iem.s.uRexB)
9946 {
9947 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9948 SET_SS_DEF();
9949 }
9950 else
9951 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9952 }
9953 else
9954 {
9955 uint32_t u32Disp;
9956 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9957 u64EffAddr += (int32_t)u32Disp;
9958 uInfo |= u32Disp;
9959 }
9960 break;
9961 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9962 }
9963 break;
9964 }
9965 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9966 }
9967
9968 /* Get and add the displacement. */
9969 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9970 {
9971 case 0:
9972 break;
9973 case 1:
9974 {
9975 int8_t i8Disp;
9976 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9977 u64EffAddr += i8Disp;
9978 uInfo |= (uint32_t)(int32_t)i8Disp;
9979 break;
9980 }
9981 case 2:
9982 {
9983 uint32_t u32Disp;
9984 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9985 u64EffAddr += (int32_t)u32Disp;
9986 uInfo |= u32Disp;
9987 break;
9988 }
9989 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9990 }
9991
9992 }
9993
9994 *puInfo = uInfo;
9995 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9996 {
9997 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo));
9998 return u64EffAddr;
9999 }
10000 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
10001 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo));
10002 return u64EffAddr & UINT32_MAX;
10003}
10004#endif /* IEM_WITH_SETJMP */
10005
10006/** @} */
10007
10008
10009#ifdef LOG_ENABLED
10010/**
10011 * Logs the current instruction.
10012 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10013 * @param fSameCtx Set if we have the same context information as the VMM,
10014 * clear if we may have already executed an instruction in
10015 * our debug context. When clear, we assume IEMCPU holds
10016 * valid CPU mode info.
10017 *
10018 * The @a fSameCtx parameter is now misleading and obsolete.
10019 * @param pszFunction The IEM function doing the execution.
10020 */
10021static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
10022{
10023# ifdef IN_RING3
10024 if (LogIs2Enabled())
10025 {
10026 char szInstr[256];
10027 uint32_t cbInstr = 0;
10028 if (fSameCtx)
10029 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10030 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10031 szInstr, sizeof(szInstr), &cbInstr);
10032 else
10033 {
10034 uint32_t fFlags = 0;
10035 switch (pVCpu->iem.s.enmCpuMode)
10036 {
10037 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10038 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10039 case IEMMODE_16BIT:
10040 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
10041 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10042 else
10043 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10044 break;
10045 }
10046 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
10047 szInstr, sizeof(szInstr), &cbInstr);
10048 }
10049
10050 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
10051 Log2(("**** %s\n"
10052 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10053 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10054 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10055 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10056 " %s\n"
10057 , pszFunction,
10058 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
10059 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
10060 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
10061 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
10062 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10063 szInstr));
10064
10065 if (LogIs3Enabled())
10066 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
10067 }
10068 else
10069# endif
10070 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
10071 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
10072 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
10073}
10074#endif /* LOG_ENABLED */
10075
10076
10077#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10078/**
10079 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
10080 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
10081 *
10082 * @returns Modified rcStrict.
10083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10084 * @param rcStrict The instruction execution status.
10085 */
10086static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
10087{
10088 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
10089 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
10090 {
10091 /* VMX preemption timer takes priority over NMI-window exits. */
10092 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
10093 {
10094 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
10095 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
10096 }
10097 /*
10098 * Check remaining intercepts.
10099 *
10100 * NMI-window and Interrupt-window VM-exits.
10101 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
10102 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
10103 *
10104 * See Intel spec. 26.7.6 "NMI-Window Exiting".
10105 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
10106 */
10107 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
10108 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10109 && !TRPMHasTrap(pVCpu))
10110 {
10111 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
10112 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
10113 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
10114 {
10115 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
10116 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
10117 }
10118 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
10119 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
10120 {
10121 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
10122 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
10123 }
10124 }
10125 }
10126 /* TPR-below threshold/APIC write has the highest priority. */
10127 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
10128 {
10129 rcStrict = iemVmxApicWriteEmulation(pVCpu);
10130 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10131 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
10132 }
10133 /* MTF takes priority over VMX-preemption timer. */
10134 else
10135 {
10136 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
10137 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10138 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
10139 }
10140 return rcStrict;
10141}
10142#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10143
10144
10145/** @def IEM_TRY_SETJMP
10146 * Wrapper around setjmp / try, hiding all the ugly differences.
10147 *
10148 * @note Use with extreme care as this is a fragile macro.
10149 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
10150 * @param a_rcTarget The variable that should receive the status code in case
10151 * of a longjmp/throw.
10152 */
10153/** @def IEM_TRY_SETJMP_AGAIN
10154 * For when setjmp / try is used again in the same variable scope as a previous
10155 * IEM_TRY_SETJMP invocation.
10156 */
10157/** @def IEM_CATCH_LONGJMP_BEGIN
10158 * Start wrapper for catch / setjmp-else.
10159 *
10160 * This will set up a scope.
10161 *
10162 * @note Use with extreme care as this is a fragile macro.
10163 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
10164 * @param a_rcTarget The variable that should receive the status code in case
10165 * of a longjmp/throw.
10166 */
10167/** @def IEM_CATCH_LONGJMP_END
10168 * End wrapper for catch / setjmp-else.
10169 *
10170 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
10171 * state.
10172 *
10173 * @note Use with extreme care as this is a fragile macro.
10174 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
10175 */
10176#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
10177# ifdef IEM_WITH_THROW_CATCH
10178# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
10179 a_rcTarget = VINF_SUCCESS; \
10180 try
10181# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
10182 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
10183# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
10184 catch (int rcThrown) \
10185 { \
10186 a_rcTarget = rcThrown
10187# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
10188 } \
10189 ((void)0)
10190# else /* !IEM_WITH_THROW_CATCH */
10191# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
10192 jmp_buf JmpBuf; \
10193 jmp_buf * volatile pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
10194 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
10195 if ((rcStrict = setjmp(JmpBuf)) == 0)
10196# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
10197 pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
10198 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
10199 if ((rcStrict = setjmp(JmpBuf)) == 0)
10200# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
10201 else \
10202 { \
10203 ((void)0)
10204# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
10205 } \
10206 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
10207# endif /* !IEM_WITH_THROW_CATCH */
10208#endif /* IEM_WITH_SETJMP */
10209
10210
10211/**
10212 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10213 * IEMExecOneWithPrefetchedByPC.
10214 *
10215 * Similar code is found in IEMExecLots.
10216 *
10217 * @return Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10219 * @param fExecuteInhibit If set, execute the instruction following CLI,
10220 * POP SS and MOV SS,GR.
10221 * @param pszFunction The calling function name.
10222 */
10223DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
10224{
10225 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10226 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10227 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10228 RT_NOREF_PV(pszFunction);
10229
10230#ifdef IEM_WITH_SETJMP
10231 VBOXSTRICTRC rcStrict;
10232 IEM_TRY_SETJMP(pVCpu, rcStrict)
10233 {
10234 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10235 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10236 }
10237 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10238 {
10239 pVCpu->iem.s.cLongJumps++;
10240 }
10241 IEM_CATCH_LONGJMP_END(pVCpu);
10242#else
10243 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10244 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10245#endif
10246 if (rcStrict == VINF_SUCCESS)
10247 pVCpu->iem.s.cInstructions++;
10248 if (pVCpu->iem.s.cActiveMappings > 0)
10249 {
10250 Assert(rcStrict != VINF_SUCCESS);
10251 iemMemRollback(pVCpu);
10252 }
10253 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10254 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10255 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10256
10257//#ifdef DEBUG
10258// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10259//#endif
10260
10261#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10262 /*
10263 * Perform any VMX nested-guest instruction boundary actions.
10264 *
10265 * If any of these causes a VM-exit, we must skip executing the next
10266 * instruction (would run into stale page tables). A VM-exit makes sure
10267 * there is no interrupt-inhibition, so that should ensure we don't go
10268 * to try execute the next instruction. Clearing fExecuteInhibit is
10269 * problematic because of the setjmp/longjmp clobbering above.
10270 */
10271 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10272 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10273 || rcStrict != VINF_SUCCESS)
10274 { /* likely */ }
10275 else
10276 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10277#endif
10278
10279 /* Execute the next instruction as well if a cli, pop ss or
10280 mov ss, Gr has just completed successfully. */
10281 if ( fExecuteInhibit
10282 && rcStrict == VINF_SUCCESS
10283 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10284 {
10285 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
10286 if (rcStrict == VINF_SUCCESS)
10287 {
10288#ifdef LOG_ENABLED
10289 iemLogCurInstr(pVCpu, false, pszFunction);
10290#endif
10291#ifdef IEM_WITH_SETJMP
10292 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10293 {
10294 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10295 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10296 }
10297 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10298 {
10299 pVCpu->iem.s.cLongJumps++;
10300 }
10301 IEM_CATCH_LONGJMP_END(pVCpu);
10302#else
10303 IEM_OPCODE_GET_FIRST_U8(&b);
10304 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10305#endif
10306 if (rcStrict == VINF_SUCCESS)
10307 {
10308 pVCpu->iem.s.cInstructions++;
10309#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10310 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10311 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10312 { /* likely */ }
10313 else
10314 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10315#endif
10316 }
10317 if (pVCpu->iem.s.cActiveMappings > 0)
10318 {
10319 Assert(rcStrict != VINF_SUCCESS);
10320 iemMemRollback(pVCpu);
10321 }
10322 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10323 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10324 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10325 }
10326 else if (pVCpu->iem.s.cActiveMappings > 0)
10327 iemMemRollback(pVCpu);
10328 /** @todo drop this after we bake this change into RIP advancing. */
10329 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10330 }
10331
10332 /*
10333 * Return value fiddling, statistics and sanity assertions.
10334 */
10335 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10336
10337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10339 return rcStrict;
10340}
10341
10342
10343/**
10344 * Execute one instruction.
10345 *
10346 * @return Strict VBox status code.
10347 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10348 */
10349VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10350{
10351 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10352#ifdef LOG_ENABLED
10353 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10354#endif
10355
10356 /*
10357 * Do the decoding and emulation.
10358 */
10359 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10360 if (rcStrict == VINF_SUCCESS)
10361 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10362 else if (pVCpu->iem.s.cActiveMappings > 0)
10363 iemMemRollback(pVCpu);
10364
10365 if (rcStrict != VINF_SUCCESS)
10366 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10367 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10368 return rcStrict;
10369}
10370
10371
10372VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10373{
10374 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10375 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10376 if (rcStrict == VINF_SUCCESS)
10377 {
10378 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10379 if (pcbWritten)
10380 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10381 }
10382 else if (pVCpu->iem.s.cActiveMappings > 0)
10383 iemMemRollback(pVCpu);
10384
10385 return rcStrict;
10386}
10387
10388
10389VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10390 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10391{
10392 VBOXSTRICTRC rcStrict;
10393 if ( cbOpcodeBytes
10394 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10395 {
10396 iemInitDecoder(pVCpu, false, false);
10397#ifdef IEM_WITH_CODE_TLB
10398 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10399 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10400 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10401 pVCpu->iem.s.offCurInstrStart = 0;
10402 pVCpu->iem.s.offInstrNextByte = 0;
10403#else
10404 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10405 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10406#endif
10407 rcStrict = VINF_SUCCESS;
10408 }
10409 else
10410 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10411 if (rcStrict == VINF_SUCCESS)
10412 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10413 else if (pVCpu->iem.s.cActiveMappings > 0)
10414 iemMemRollback(pVCpu);
10415
10416 return rcStrict;
10417}
10418
10419
10420VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10421{
10422 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10423 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10424 if (rcStrict == VINF_SUCCESS)
10425 {
10426 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10427 if (pcbWritten)
10428 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10429 }
10430 else if (pVCpu->iem.s.cActiveMappings > 0)
10431 iemMemRollback(pVCpu);
10432
10433 return rcStrict;
10434}
10435
10436
10437VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10438 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10439{
10440 VBOXSTRICTRC rcStrict;
10441 if ( cbOpcodeBytes
10442 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10443 {
10444 iemInitDecoder(pVCpu, true, false);
10445#ifdef IEM_WITH_CODE_TLB
10446 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10447 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10448 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10449 pVCpu->iem.s.offCurInstrStart = 0;
10450 pVCpu->iem.s.offInstrNextByte = 0;
10451#else
10452 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10453 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10454#endif
10455 rcStrict = VINF_SUCCESS;
10456 }
10457 else
10458 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10459 if (rcStrict == VINF_SUCCESS)
10460 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10461 else if (pVCpu->iem.s.cActiveMappings > 0)
10462 iemMemRollback(pVCpu);
10463
10464 return rcStrict;
10465}
10466
10467
10468/**
10469 * For handling split cacheline lock operations when the host has split-lock
10470 * detection enabled.
10471 *
10472 * This will cause the interpreter to disregard the lock prefix and implicit
10473 * locking (xchg).
10474 *
10475 * @returns Strict VBox status code.
10476 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10477 */
10478VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10479{
10480 /*
10481 * Do the decoding and emulation.
10482 */
10483 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
10484 if (rcStrict == VINF_SUCCESS)
10485 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10486 else if (pVCpu->iem.s.cActiveMappings > 0)
10487 iemMemRollback(pVCpu);
10488
10489 if (rcStrict != VINF_SUCCESS)
10490 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10491 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10492 return rcStrict;
10493}
10494
10495
10496VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10497{
10498 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10499 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10500
10501 /*
10502 * See if there is an interrupt pending in TRPM, inject it if we can.
10503 */
10504 /** @todo What if we are injecting an exception and not an interrupt? Is that
10505 * possible here? For now we assert it is indeed only an interrupt. */
10506 if (!TRPMHasTrap(pVCpu))
10507 { /* likely */ }
10508 else
10509 {
10510 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10511 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10512 {
10513 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10514#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10515 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10516 if (fIntrEnabled)
10517 {
10518 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10519 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10520 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10521 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10522 else
10523 {
10524 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10525 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10526 }
10527 }
10528#else
10529 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10530#endif
10531 if (fIntrEnabled)
10532 {
10533 uint8_t u8TrapNo;
10534 TRPMEVENT enmType;
10535 uint32_t uErrCode;
10536 RTGCPTR uCr2;
10537 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10538 AssertRC(rc2);
10539 Assert(enmType == TRPM_HARDWARE_INT);
10540 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10541
10542 TRPMResetTrap(pVCpu);
10543
10544#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10545 /* Injecting an event may cause a VM-exit. */
10546 if ( rcStrict != VINF_SUCCESS
10547 && rcStrict != VINF_IEM_RAISED_XCPT)
10548 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10549#else
10550 NOREF(rcStrict);
10551#endif
10552 }
10553 }
10554 }
10555
10556 /*
10557 * Initial decoder init w/ prefetch, then setup setjmp.
10558 */
10559 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10560 if (rcStrict == VINF_SUCCESS)
10561 {
10562#ifdef IEM_WITH_SETJMP
10563 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10564 IEM_TRY_SETJMP(pVCpu, rcStrict)
10565#endif
10566 {
10567 /*
10568 * The run loop. We limit ourselves to 4096 instructions right now.
10569 */
10570 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10572 for (;;)
10573 {
10574 /*
10575 * Log the state.
10576 */
10577#ifdef LOG_ENABLED
10578 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10579#endif
10580
10581 /*
10582 * Do the decoding and emulation.
10583 */
10584 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10585 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10586#ifdef VBOX_STRICT
10587 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10588#endif
10589 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10590 {
10591 Assert(pVCpu->iem.s.cActiveMappings == 0);
10592 pVCpu->iem.s.cInstructions++;
10593
10594#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10595 /* Perform any VMX nested-guest instruction boundary actions. */
10596 uint64_t fCpu = pVCpu->fLocalForcedActions;
10597 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10598 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10599 { /* likely */ }
10600 else
10601 {
10602 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10603 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10604 fCpu = pVCpu->fLocalForcedActions;
10605 else
10606 {
10607 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10608 break;
10609 }
10610 }
10611#endif
10612 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10613 {
10614#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10615 uint64_t fCpu = pVCpu->fLocalForcedActions;
10616#endif
10617 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10618 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10619 | VMCPU_FF_TLB_FLUSH
10620 | VMCPU_FF_UNHALT );
10621
10622 if (RT_LIKELY( ( !fCpu
10623 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10624 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10625 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10626 {
10627 if (cMaxInstructionsGccStupidity-- > 0)
10628 {
10629 /* Poll timers every now an then according to the caller's specs. */
10630 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10631 || !TMTimerPollBool(pVM, pVCpu))
10632 {
10633 Assert(pVCpu->iem.s.cActiveMappings == 0);
10634 iemReInitDecoder(pVCpu);
10635 continue;
10636 }
10637 }
10638 }
10639 }
10640 Assert(pVCpu->iem.s.cActiveMappings == 0);
10641 }
10642 else if (pVCpu->iem.s.cActiveMappings > 0)
10643 iemMemRollback(pVCpu);
10644 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10645 break;
10646 }
10647 }
10648#ifdef IEM_WITH_SETJMP
10649 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10650 {
10651 if (pVCpu->iem.s.cActiveMappings > 0)
10652 iemMemRollback(pVCpu);
10653# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10654 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10655# endif
10656 pVCpu->iem.s.cLongJumps++;
10657 }
10658 IEM_CATCH_LONGJMP_END(pVCpu);
10659#endif
10660
10661 /*
10662 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10663 */
10664 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10665 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10666 }
10667 else
10668 {
10669 if (pVCpu->iem.s.cActiveMappings > 0)
10670 iemMemRollback(pVCpu);
10671
10672#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10673 /*
10674 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10675 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10676 */
10677 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10678#endif
10679 }
10680
10681 /*
10682 * Maybe re-enter raw-mode and log.
10683 */
10684 if (rcStrict != VINF_SUCCESS)
10685 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10686 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10687 if (pcInstructions)
10688 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10689 return rcStrict;
10690}
10691
10692
10693/**
10694 * Interface used by EMExecuteExec, does exit statistics and limits.
10695 *
10696 * @returns Strict VBox status code.
10697 * @param pVCpu The cross context virtual CPU structure.
10698 * @param fWillExit To be defined.
10699 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10700 * @param cMaxInstructions Maximum number of instructions to execute.
10701 * @param cMaxInstructionsWithoutExits
10702 * The max number of instructions without exits.
10703 * @param pStats Where to return statistics.
10704 */
10705VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10706 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10707{
10708 NOREF(fWillExit); /** @todo define flexible exit crits */
10709
10710 /*
10711 * Initialize return stats.
10712 */
10713 pStats->cInstructions = 0;
10714 pStats->cExits = 0;
10715 pStats->cMaxExitDistance = 0;
10716 pStats->cReserved = 0;
10717
10718 /*
10719 * Initial decoder init w/ prefetch, then setup setjmp.
10720 */
10721 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10722 if (rcStrict == VINF_SUCCESS)
10723 {
10724#ifdef IEM_WITH_SETJMP
10725 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10726 IEM_TRY_SETJMP(pVCpu, rcStrict)
10727#endif
10728 {
10729#ifdef IN_RING0
10730 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10731#endif
10732 uint32_t cInstructionSinceLastExit = 0;
10733
10734 /*
10735 * The run loop. We limit ourselves to 4096 instructions right now.
10736 */
10737 PVM pVM = pVCpu->CTX_SUFF(pVM);
10738 for (;;)
10739 {
10740 /*
10741 * Log the state.
10742 */
10743#ifdef LOG_ENABLED
10744 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10745#endif
10746
10747 /*
10748 * Do the decoding and emulation.
10749 */
10750 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10751
10752 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10753 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10754
10755 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10756 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10757 {
10758 pStats->cExits += 1;
10759 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10760 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10761 cInstructionSinceLastExit = 0;
10762 }
10763
10764 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10765 {
10766 Assert(pVCpu->iem.s.cActiveMappings == 0);
10767 pVCpu->iem.s.cInstructions++;
10768 pStats->cInstructions++;
10769 cInstructionSinceLastExit++;
10770
10771#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10772 /* Perform any VMX nested-guest instruction boundary actions. */
10773 uint64_t fCpu = pVCpu->fLocalForcedActions;
10774 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10775 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10776 { /* likely */ }
10777 else
10778 {
10779 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10780 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10781 fCpu = pVCpu->fLocalForcedActions;
10782 else
10783 {
10784 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10785 break;
10786 }
10787 }
10788#endif
10789 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10790 {
10791#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10792 uint64_t fCpu = pVCpu->fLocalForcedActions;
10793#endif
10794 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10795 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10796 | VMCPU_FF_TLB_FLUSH
10797 | VMCPU_FF_UNHALT );
10798 if (RT_LIKELY( ( ( !fCpu
10799 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10800 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10801 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10802 || pStats->cInstructions < cMinInstructions))
10803 {
10804 if (pStats->cInstructions < cMaxInstructions)
10805 {
10806 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10807 {
10808#ifdef IN_RING0
10809 if ( !fCheckPreemptionPending
10810 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10811#endif
10812 {
10813 Assert(pVCpu->iem.s.cActiveMappings == 0);
10814 iemReInitDecoder(pVCpu);
10815 continue;
10816 }
10817#ifdef IN_RING0
10818 rcStrict = VINF_EM_RAW_INTERRUPT;
10819 break;
10820#endif
10821 }
10822 }
10823 }
10824 Assert(!(fCpu & VMCPU_FF_IEM));
10825 }
10826 Assert(pVCpu->iem.s.cActiveMappings == 0);
10827 }
10828 else if (pVCpu->iem.s.cActiveMappings > 0)
10829 iemMemRollback(pVCpu);
10830 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10831 break;
10832 }
10833 }
10834#ifdef IEM_WITH_SETJMP
10835 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10836 {
10837 if (pVCpu->iem.s.cActiveMappings > 0)
10838 iemMemRollback(pVCpu);
10839 pVCpu->iem.s.cLongJumps++;
10840 }
10841 IEM_CATCH_LONGJMP_END(pVCpu);
10842#endif
10843
10844 /*
10845 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10846 */
10847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10849 }
10850 else
10851 {
10852 if (pVCpu->iem.s.cActiveMappings > 0)
10853 iemMemRollback(pVCpu);
10854
10855#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10856 /*
10857 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10858 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10859 */
10860 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10861#endif
10862 }
10863
10864 /*
10865 * Maybe re-enter raw-mode and log.
10866 */
10867 if (rcStrict != VINF_SUCCESS)
10868 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10869 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10870 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10871 return rcStrict;
10872}
10873
10874
10875/**
10876 * Injects a trap, fault, abort, software interrupt or external interrupt.
10877 *
10878 * The parameter list matches TRPMQueryTrapAll pretty closely.
10879 *
10880 * @returns Strict VBox status code.
10881 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10882 * @param u8TrapNo The trap number.
10883 * @param enmType What type is it (trap/fault/abort), software
10884 * interrupt or hardware interrupt.
10885 * @param uErrCode The error code if applicable.
10886 * @param uCr2 The CR2 value if applicable.
10887 * @param cbInstr The instruction length (only relevant for
10888 * software interrupts).
10889 */
10890VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10891 uint8_t cbInstr)
10892{
10893 iemInitDecoder(pVCpu, false, false);
10894#ifdef DBGFTRACE_ENABLED
10895 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10896 u8TrapNo, enmType, uErrCode, uCr2);
10897#endif
10898
10899 uint32_t fFlags;
10900 switch (enmType)
10901 {
10902 case TRPM_HARDWARE_INT:
10903 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10904 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10905 uErrCode = uCr2 = 0;
10906 break;
10907
10908 case TRPM_SOFTWARE_INT:
10909 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10910 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10911 uErrCode = uCr2 = 0;
10912 break;
10913
10914 case TRPM_TRAP:
10915 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10916 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10917 if (u8TrapNo == X86_XCPT_PF)
10918 fFlags |= IEM_XCPT_FLAGS_CR2;
10919 switch (u8TrapNo)
10920 {
10921 case X86_XCPT_DF:
10922 case X86_XCPT_TS:
10923 case X86_XCPT_NP:
10924 case X86_XCPT_SS:
10925 case X86_XCPT_PF:
10926 case X86_XCPT_AC:
10927 case X86_XCPT_GP:
10928 fFlags |= IEM_XCPT_FLAGS_ERR;
10929 break;
10930 }
10931 break;
10932
10933 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10934 }
10935
10936 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10937
10938 if (pVCpu->iem.s.cActiveMappings > 0)
10939 iemMemRollback(pVCpu);
10940
10941 return rcStrict;
10942}
10943
10944
10945/**
10946 * Injects the active TRPM event.
10947 *
10948 * @returns Strict VBox status code.
10949 * @param pVCpu The cross context virtual CPU structure.
10950 */
10951VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10952{
10953#ifndef IEM_IMPLEMENTS_TASKSWITCH
10954 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10955#else
10956 uint8_t u8TrapNo;
10957 TRPMEVENT enmType;
10958 uint32_t uErrCode;
10959 RTGCUINTPTR uCr2;
10960 uint8_t cbInstr;
10961 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10962 if (RT_FAILURE(rc))
10963 return rc;
10964
10965 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10966 * ICEBP \#DB injection as a special case. */
10967 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10968#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10969 if (rcStrict == VINF_SVM_VMEXIT)
10970 rcStrict = VINF_SUCCESS;
10971#endif
10972#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10973 if (rcStrict == VINF_VMX_VMEXIT)
10974 rcStrict = VINF_SUCCESS;
10975#endif
10976 /** @todo Are there any other codes that imply the event was successfully
10977 * delivered to the guest? See @bugref{6607}. */
10978 if ( rcStrict == VINF_SUCCESS
10979 || rcStrict == VINF_IEM_RAISED_XCPT)
10980 TRPMResetTrap(pVCpu);
10981
10982 return rcStrict;
10983#endif
10984}
10985
10986
10987VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10988{
10989 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10990 return VERR_NOT_IMPLEMENTED;
10991}
10992
10993
10994VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10995{
10996 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10997 return VERR_NOT_IMPLEMENTED;
10998}
10999
11000
11001/**
11002 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11003 *
11004 * This API ASSUMES that the caller has already verified that the guest code is
11005 * allowed to access the I/O port. (The I/O port is in the DX register in the
11006 * guest state.)
11007 *
11008 * @returns Strict VBox status code.
11009 * @param pVCpu The cross context virtual CPU structure.
11010 * @param cbValue The size of the I/O port access (1, 2, or 4).
11011 * @param enmAddrMode The addressing mode.
11012 * @param fRepPrefix Indicates whether a repeat prefix is used
11013 * (doesn't matter which for this instruction).
11014 * @param cbInstr The instruction length in bytes.
11015 * @param iEffSeg The effective segment address.
11016 * @param fIoChecked Whether the access to the I/O port has been
11017 * checked or not. It's typically checked in the
11018 * HM scenario.
11019 */
11020VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11021 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11022{
11023 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11024 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11025
11026 /*
11027 * State init.
11028 */
11029 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11030
11031 /*
11032 * Switch orgy for getting to the right handler.
11033 */
11034 VBOXSTRICTRC rcStrict;
11035 if (fRepPrefix)
11036 {
11037 switch (enmAddrMode)
11038 {
11039 case IEMMODE_16BIT:
11040 switch (cbValue)
11041 {
11042 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11043 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11044 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11045 default:
11046 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11047 }
11048 break;
11049
11050 case IEMMODE_32BIT:
11051 switch (cbValue)
11052 {
11053 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11054 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11055 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11056 default:
11057 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11058 }
11059 break;
11060
11061 case IEMMODE_64BIT:
11062 switch (cbValue)
11063 {
11064 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11065 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11066 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11067 default:
11068 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11069 }
11070 break;
11071
11072 default:
11073 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11074 }
11075 }
11076 else
11077 {
11078 switch (enmAddrMode)
11079 {
11080 case IEMMODE_16BIT:
11081 switch (cbValue)
11082 {
11083 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11084 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11085 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11086 default:
11087 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11088 }
11089 break;
11090
11091 case IEMMODE_32BIT:
11092 switch (cbValue)
11093 {
11094 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11095 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11096 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11097 default:
11098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11099 }
11100 break;
11101
11102 case IEMMODE_64BIT:
11103 switch (cbValue)
11104 {
11105 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11106 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11107 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11108 default:
11109 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11110 }
11111 break;
11112
11113 default:
11114 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11115 }
11116 }
11117
11118 if (pVCpu->iem.s.cActiveMappings)
11119 iemMemRollback(pVCpu);
11120
11121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11122}
11123
11124
11125/**
11126 * Interface for HM and EM for executing string I/O IN (read) instructions.
11127 *
11128 * This API ASSUMES that the caller has already verified that the guest code is
11129 * allowed to access the I/O port. (The I/O port is in the DX register in the
11130 * guest state.)
11131 *
11132 * @returns Strict VBox status code.
11133 * @param pVCpu The cross context virtual CPU structure.
11134 * @param cbValue The size of the I/O port access (1, 2, or 4).
11135 * @param enmAddrMode The addressing mode.
11136 * @param fRepPrefix Indicates whether a repeat prefix is used
11137 * (doesn't matter which for this instruction).
11138 * @param cbInstr The instruction length in bytes.
11139 * @param fIoChecked Whether the access to the I/O port has been
11140 * checked or not. It's typically checked in the
11141 * HM scenario.
11142 */
11143VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11144 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11145{
11146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11147
11148 /*
11149 * State init.
11150 */
11151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11152
11153 /*
11154 * Switch orgy for getting to the right handler.
11155 */
11156 VBOXSTRICTRC rcStrict;
11157 if (fRepPrefix)
11158 {
11159 switch (enmAddrMode)
11160 {
11161 case IEMMODE_16BIT:
11162 switch (cbValue)
11163 {
11164 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11165 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11166 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11167 default:
11168 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11169 }
11170 break;
11171
11172 case IEMMODE_32BIT:
11173 switch (cbValue)
11174 {
11175 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11176 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11177 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11178 default:
11179 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11180 }
11181 break;
11182
11183 case IEMMODE_64BIT:
11184 switch (cbValue)
11185 {
11186 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11187 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11188 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11189 default:
11190 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11191 }
11192 break;
11193
11194 default:
11195 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11196 }
11197 }
11198 else
11199 {
11200 switch (enmAddrMode)
11201 {
11202 case IEMMODE_16BIT:
11203 switch (cbValue)
11204 {
11205 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11206 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11207 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11208 default:
11209 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11210 }
11211 break;
11212
11213 case IEMMODE_32BIT:
11214 switch (cbValue)
11215 {
11216 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11217 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11218 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11219 default:
11220 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11221 }
11222 break;
11223
11224 case IEMMODE_64BIT:
11225 switch (cbValue)
11226 {
11227 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11228 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11229 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11230 default:
11231 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11232 }
11233 break;
11234
11235 default:
11236 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11237 }
11238 }
11239
11240 if ( pVCpu->iem.s.cActiveMappings == 0
11241 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11242 { /* likely */ }
11243 else
11244 {
11245 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11246 iemMemRollback(pVCpu);
11247 }
11248 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11249}
11250
11251
11252/**
11253 * Interface for rawmode to write execute an OUT instruction.
11254 *
11255 * @returns Strict VBox status code.
11256 * @param pVCpu The cross context virtual CPU structure.
11257 * @param cbInstr The instruction length in bytes.
11258 * @param u16Port The port to read.
11259 * @param fImm Whether the port is specified using an immediate operand or
11260 * using the implicit DX register.
11261 * @param cbReg The register size.
11262 *
11263 * @remarks In ring-0 not all of the state needs to be synced in.
11264 */
11265VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11266{
11267 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11268 Assert(cbReg <= 4 && cbReg != 3);
11269
11270 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
11272 Assert(!pVCpu->iem.s.cActiveMappings);
11273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11274}
11275
11276
11277/**
11278 * Interface for rawmode to write execute an IN instruction.
11279 *
11280 * @returns Strict VBox status code.
11281 * @param pVCpu The cross context virtual CPU structure.
11282 * @param cbInstr The instruction length in bytes.
11283 * @param u16Port The port to read.
11284 * @param fImm Whether the port is specified using an immediate operand or
11285 * using the implicit DX.
11286 * @param cbReg The register size.
11287 */
11288VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11289{
11290 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11291 Assert(cbReg <= 4 && cbReg != 3);
11292
11293 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11294 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
11295 Assert(!pVCpu->iem.s.cActiveMappings);
11296 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11297}
11298
11299
11300/**
11301 * Interface for HM and EM to write to a CRx register.
11302 *
11303 * @returns Strict VBox status code.
11304 * @param pVCpu The cross context virtual CPU structure.
11305 * @param cbInstr The instruction length in bytes.
11306 * @param iCrReg The control register number (destination).
11307 * @param iGReg The general purpose register number (source).
11308 *
11309 * @remarks In ring-0 not all of the state needs to be synced in.
11310 */
11311VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11312{
11313 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11314 Assert(iCrReg < 16);
11315 Assert(iGReg < 16);
11316
11317 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11318 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11319 Assert(!pVCpu->iem.s.cActiveMappings);
11320 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11321}
11322
11323
11324/**
11325 * Interface for HM and EM to read from a CRx register.
11326 *
11327 * @returns Strict VBox status code.
11328 * @param pVCpu The cross context virtual CPU structure.
11329 * @param cbInstr The instruction length in bytes.
11330 * @param iGReg The general purpose register number (destination).
11331 * @param iCrReg The control register number (source).
11332 *
11333 * @remarks In ring-0 not all of the state needs to be synced in.
11334 */
11335VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11336{
11337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11338 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11339 | CPUMCTX_EXTRN_APIC_TPR);
11340 Assert(iCrReg < 16);
11341 Assert(iGReg < 16);
11342
11343 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11345 Assert(!pVCpu->iem.s.cActiveMappings);
11346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11347}
11348
11349
11350/**
11351 * Interface for HM and EM to write to a DRx register.
11352 *
11353 * @returns Strict VBox status code.
11354 * @param pVCpu The cross context virtual CPU structure.
11355 * @param cbInstr The instruction length in bytes.
11356 * @param iDrReg The debug register number (destination).
11357 * @param iGReg The general purpose register number (source).
11358 *
11359 * @remarks In ring-0 not all of the state needs to be synced in.
11360 */
11361VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11362{
11363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11364 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11365 Assert(iDrReg < 8);
11366 Assert(iGReg < 16);
11367
11368 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11369 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11370 Assert(!pVCpu->iem.s.cActiveMappings);
11371 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11372}
11373
11374
11375/**
11376 * Interface for HM and EM to read from a DRx register.
11377 *
11378 * @returns Strict VBox status code.
11379 * @param pVCpu The cross context virtual CPU structure.
11380 * @param cbInstr The instruction length in bytes.
11381 * @param iGReg The general purpose register number (destination).
11382 * @param iDrReg The debug register number (source).
11383 *
11384 * @remarks In ring-0 not all of the state needs to be synced in.
11385 */
11386VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11387{
11388 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11389 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11390 Assert(iDrReg < 8);
11391 Assert(iGReg < 16);
11392
11393 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11394 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11395 Assert(!pVCpu->iem.s.cActiveMappings);
11396 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11397}
11398
11399
11400/**
11401 * Interface for HM and EM to clear the CR0[TS] bit.
11402 *
11403 * @returns Strict VBox status code.
11404 * @param pVCpu The cross context virtual CPU structure.
11405 * @param cbInstr The instruction length in bytes.
11406 *
11407 * @remarks In ring-0 not all of the state needs to be synced in.
11408 */
11409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11410{
11411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11412
11413 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11415 Assert(!pVCpu->iem.s.cActiveMappings);
11416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11417}
11418
11419
11420/**
11421 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11422 *
11423 * @returns Strict VBox status code.
11424 * @param pVCpu The cross context virtual CPU structure.
11425 * @param cbInstr The instruction length in bytes.
11426 * @param uValue The value to load into CR0.
11427 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11428 * memory operand. Otherwise pass NIL_RTGCPTR.
11429 *
11430 * @remarks In ring-0 not all of the state needs to be synced in.
11431 */
11432VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11433{
11434 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11435
11436 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11438 Assert(!pVCpu->iem.s.cActiveMappings);
11439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11440}
11441
11442
11443/**
11444 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11445 *
11446 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11447 *
11448 * @returns Strict VBox status code.
11449 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11450 * @param cbInstr The instruction length in bytes.
11451 * @remarks In ring-0 not all of the state needs to be synced in.
11452 * @thread EMT(pVCpu)
11453 */
11454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11455{
11456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11457
11458 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11460 Assert(!pVCpu->iem.s.cActiveMappings);
11461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11462}
11463
11464
11465/**
11466 * Interface for HM and EM to emulate the WBINVD instruction.
11467 *
11468 * @returns Strict VBox status code.
11469 * @param pVCpu The cross context virtual CPU structure.
11470 * @param cbInstr The instruction length in bytes.
11471 *
11472 * @remarks In ring-0 not all of the state needs to be synced in.
11473 */
11474VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11475{
11476 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11477
11478 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11479 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11480 Assert(!pVCpu->iem.s.cActiveMappings);
11481 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11482}
11483
11484
11485/**
11486 * Interface for HM and EM to emulate the INVD instruction.
11487 *
11488 * @returns Strict VBox status code.
11489 * @param pVCpu The cross context virtual CPU structure.
11490 * @param cbInstr The instruction length in bytes.
11491 *
11492 * @remarks In ring-0 not all of the state needs to be synced in.
11493 */
11494VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11495{
11496 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11497
11498 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11500 Assert(!pVCpu->iem.s.cActiveMappings);
11501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11502}
11503
11504
11505/**
11506 * Interface for HM and EM to emulate the INVLPG instruction.
11507 *
11508 * @returns Strict VBox status code.
11509 * @retval VINF_PGM_SYNC_CR3
11510 *
11511 * @param pVCpu The cross context virtual CPU structure.
11512 * @param cbInstr The instruction length in bytes.
11513 * @param GCPtrPage The effective address of the page to invalidate.
11514 *
11515 * @remarks In ring-0 not all of the state needs to be synced in.
11516 */
11517VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11518{
11519 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11520
11521 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11523 Assert(!pVCpu->iem.s.cActiveMappings);
11524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11525}
11526
11527
11528/**
11529 * Interface for HM and EM to emulate the INVPCID instruction.
11530 *
11531 * @returns Strict VBox status code.
11532 * @retval VINF_PGM_SYNC_CR3
11533 *
11534 * @param pVCpu The cross context virtual CPU structure.
11535 * @param cbInstr The instruction length in bytes.
11536 * @param iEffSeg The effective segment register.
11537 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11538 * @param uType The invalidation type.
11539 *
11540 * @remarks In ring-0 not all of the state needs to be synced in.
11541 */
11542VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11543 uint64_t uType)
11544{
11545 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11546
11547 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11548 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11549 Assert(!pVCpu->iem.s.cActiveMappings);
11550 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11551}
11552
11553
11554/**
11555 * Interface for HM and EM to emulate the CPUID instruction.
11556 *
11557 * @returns Strict VBox status code.
11558 *
11559 * @param pVCpu The cross context virtual CPU structure.
11560 * @param cbInstr The instruction length in bytes.
11561 *
11562 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11563 */
11564VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11565{
11566 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11567 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11568
11569 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11570 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11571 Assert(!pVCpu->iem.s.cActiveMappings);
11572 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11573}
11574
11575
11576/**
11577 * Interface for HM and EM to emulate the RDPMC instruction.
11578 *
11579 * @returns Strict VBox status code.
11580 *
11581 * @param pVCpu The cross context virtual CPU structure.
11582 * @param cbInstr The instruction length in bytes.
11583 *
11584 * @remarks Not all of the state needs to be synced in.
11585 */
11586VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11587{
11588 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11590
11591 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11592 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11593 Assert(!pVCpu->iem.s.cActiveMappings);
11594 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11595}
11596
11597
11598/**
11599 * Interface for HM and EM to emulate the RDTSC instruction.
11600 *
11601 * @returns Strict VBox status code.
11602 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11603 *
11604 * @param pVCpu The cross context virtual CPU structure.
11605 * @param cbInstr The instruction length in bytes.
11606 *
11607 * @remarks Not all of the state needs to be synced in.
11608 */
11609VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11610{
11611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11613
11614 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11615 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11616 Assert(!pVCpu->iem.s.cActiveMappings);
11617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11618}
11619
11620
11621/**
11622 * Interface for HM and EM to emulate the RDTSCP instruction.
11623 *
11624 * @returns Strict VBox status code.
11625 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11626 *
11627 * @param pVCpu The cross context virtual CPU structure.
11628 * @param cbInstr The instruction length in bytes.
11629 *
11630 * @remarks Not all of the state needs to be synced in. Recommended
11631 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11632 */
11633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11634{
11635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11636 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11637
11638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11640 Assert(!pVCpu->iem.s.cActiveMappings);
11641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11642}
11643
11644
11645/**
11646 * Interface for HM and EM to emulate the RDMSR instruction.
11647 *
11648 * @returns Strict VBox status code.
11649 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11650 *
11651 * @param pVCpu The cross context virtual CPU structure.
11652 * @param cbInstr The instruction length in bytes.
11653 *
11654 * @remarks Not all of the state needs to be synced in. Requires RCX and
11655 * (currently) all MSRs.
11656 */
11657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11658{
11659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11660 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11661
11662 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11663 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11664 Assert(!pVCpu->iem.s.cActiveMappings);
11665 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11666}
11667
11668
11669/**
11670 * Interface for HM and EM to emulate the WRMSR instruction.
11671 *
11672 * @returns Strict VBox status code.
11673 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11674 *
11675 * @param pVCpu The cross context virtual CPU structure.
11676 * @param cbInstr The instruction length in bytes.
11677 *
11678 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11679 * and (currently) all MSRs.
11680 */
11681VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11682{
11683 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11685 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11686
11687 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11688 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11689 Assert(!pVCpu->iem.s.cActiveMappings);
11690 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11691}
11692
11693
11694/**
11695 * Interface for HM and EM to emulate the MONITOR instruction.
11696 *
11697 * @returns Strict VBox status code.
11698 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11699 *
11700 * @param pVCpu The cross context virtual CPU structure.
11701 * @param cbInstr The instruction length in bytes.
11702 *
11703 * @remarks Not all of the state needs to be synced in.
11704 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11705 * are used.
11706 */
11707VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11708{
11709 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11710 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11711
11712 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11713 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11714 Assert(!pVCpu->iem.s.cActiveMappings);
11715 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11716}
11717
11718
11719/**
11720 * Interface for HM and EM to emulate the MWAIT instruction.
11721 *
11722 * @returns Strict VBox status code.
11723 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11724 *
11725 * @param pVCpu The cross context virtual CPU structure.
11726 * @param cbInstr The instruction length in bytes.
11727 *
11728 * @remarks Not all of the state needs to be synced in.
11729 */
11730VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11731{
11732 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11733 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11734
11735 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11736 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11737 Assert(!pVCpu->iem.s.cActiveMappings);
11738 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11739}
11740
11741
11742/**
11743 * Interface for HM and EM to emulate the HLT instruction.
11744 *
11745 * @returns Strict VBox status code.
11746 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11747 *
11748 * @param pVCpu The cross context virtual CPU structure.
11749 * @param cbInstr The instruction length in bytes.
11750 *
11751 * @remarks Not all of the state needs to be synced in.
11752 */
11753VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11754{
11755 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11756
11757 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11758 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11759 Assert(!pVCpu->iem.s.cActiveMappings);
11760 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11761}
11762
11763
11764/**
11765 * Checks if IEM is in the process of delivering an event (interrupt or
11766 * exception).
11767 *
11768 * @returns true if we're in the process of raising an interrupt or exception,
11769 * false otherwise.
11770 * @param pVCpu The cross context virtual CPU structure.
11771 * @param puVector Where to store the vector associated with the
11772 * currently delivered event, optional.
11773 * @param pfFlags Where to store th event delivery flags (see
11774 * IEM_XCPT_FLAGS_XXX), optional.
11775 * @param puErr Where to store the error code associated with the
11776 * event, optional.
11777 * @param puCr2 Where to store the CR2 associated with the event,
11778 * optional.
11779 * @remarks The caller should check the flags to determine if the error code and
11780 * CR2 are valid for the event.
11781 */
11782VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11783{
11784 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11785 if (fRaisingXcpt)
11786 {
11787 if (puVector)
11788 *puVector = pVCpu->iem.s.uCurXcpt;
11789 if (pfFlags)
11790 *pfFlags = pVCpu->iem.s.fCurXcpt;
11791 if (puErr)
11792 *puErr = pVCpu->iem.s.uCurXcptErr;
11793 if (puCr2)
11794 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11795 }
11796 return fRaisingXcpt;
11797}
11798
11799#ifdef IN_RING3
11800
11801/**
11802 * Handles the unlikely and probably fatal merge cases.
11803 *
11804 * @returns Merged status code.
11805 * @param rcStrict Current EM status code.
11806 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11807 * with @a rcStrict.
11808 * @param iMemMap The memory mapping index. For error reporting only.
11809 * @param pVCpu The cross context virtual CPU structure of the calling
11810 * thread, for error reporting only.
11811 */
11812DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11813 unsigned iMemMap, PVMCPUCC pVCpu)
11814{
11815 if (RT_FAILURE_NP(rcStrict))
11816 return rcStrict;
11817
11818 if (RT_FAILURE_NP(rcStrictCommit))
11819 return rcStrictCommit;
11820
11821 if (rcStrict == rcStrictCommit)
11822 return rcStrictCommit;
11823
11824 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11825 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11826 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11827 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11829 return VERR_IOM_FF_STATUS_IPE;
11830}
11831
11832
11833/**
11834 * Helper for IOMR3ProcessForceFlag.
11835 *
11836 * @returns Merged status code.
11837 * @param rcStrict Current EM status code.
11838 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11839 * with @a rcStrict.
11840 * @param iMemMap The memory mapping index. For error reporting only.
11841 * @param pVCpu The cross context virtual CPU structure of the calling
11842 * thread, for error reporting only.
11843 */
11844DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11845{
11846 /* Simple. */
11847 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11848 return rcStrictCommit;
11849
11850 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11851 return rcStrict;
11852
11853 /* EM scheduling status codes. */
11854 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11855 && rcStrict <= VINF_EM_LAST))
11856 {
11857 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11858 && rcStrictCommit <= VINF_EM_LAST))
11859 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11860 }
11861
11862 /* Unlikely */
11863 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11864}
11865
11866
11867/**
11868 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11869 *
11870 * @returns Merge between @a rcStrict and what the commit operation returned.
11871 * @param pVM The cross context VM structure.
11872 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11873 * @param rcStrict The status code returned by ring-0 or raw-mode.
11874 */
11875VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11876{
11877 /*
11878 * Reset the pending commit.
11879 */
11880 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11881 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11882 ("%#x %#x %#x\n",
11883 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11884 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11885
11886 /*
11887 * Commit the pending bounce buffers (usually just one).
11888 */
11889 unsigned cBufs = 0;
11890 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11891 while (iMemMap-- > 0)
11892 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11893 {
11894 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11895 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11896 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11897
11898 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11899 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11900 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11901
11902 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11903 {
11904 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11906 pbBuf,
11907 cbFirst,
11908 PGMACCESSORIGIN_IEM);
11909 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11910 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11911 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11912 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11913 }
11914
11915 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11916 {
11917 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11919 pbBuf + cbFirst,
11920 cbSecond,
11921 PGMACCESSORIGIN_IEM);
11922 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11923 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11924 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11925 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11926 }
11927 cBufs++;
11928 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11929 }
11930
11931 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11932 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11933 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11934 pVCpu->iem.s.cActiveMappings = 0;
11935 return rcStrict;
11936}
11937
11938#endif /* IN_RING3 */
11939
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette