VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 97407

Last change on this file since 97407 was 97406, checked in by vboxsync, 2 years ago

VMM/IEM,CPUM: Partial single stepping support in the interpreter. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 458.7 KB
Line 
1/* $Id: IEMAll.cpp 97406 2022-11-05 12:42:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <VBox/disopcode.h>
130#include <iprt/asm-math.h>
131#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
132# include <iprt/asm-amd64-x86.h>
133#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
134# include <iprt/asm-arm.h>
135#endif
136#include <iprt/assert.h>
137#include <iprt/string.h>
138#include <iprt/x86.h>
139
140#include "IEMInline.h"
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * CPU exception classes.
148 */
149typedef enum IEMXCPTCLASS
150{
151 IEMXCPTCLASS_BENIGN,
152 IEMXCPTCLASS_CONTRIBUTORY,
153 IEMXCPTCLASS_PAGE_FAULT,
154 IEMXCPTCLASS_DOUBLE_FAULT
155} IEMXCPTCLASS;
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161#if defined(IEM_LOG_MEMORY_WRITES)
162/** What IEM just wrote. */
163uint8_t g_abIemWrote[256];
164/** How much IEM just wrote. */
165size_t g_cbIemWrote;
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
173 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
174
175
176/**
177 * Initializes the decoder state.
178 *
179 * iemReInitDecoder is mostly a copy of this function.
180 *
181 * @param pVCpu The cross context virtual CPU structure of the
182 * calling thread.
183 * @param fBypassHandlers Whether to bypass access handlers.
184 * @param fDisregardLock Whether to disregard the LOCK prefix.
185 */
186DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
187{
188 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
189 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
198
199 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
200 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
201 pVCpu->iem.s.enmCpuMode = enmMode;
202 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
203 pVCpu->iem.s.enmEffAddrMode = enmMode;
204 if (enmMode != IEMMODE_64BIT)
205 {
206 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
207 pVCpu->iem.s.enmEffOpSize = enmMode;
208 }
209 else
210 {
211 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
212 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
213 }
214 pVCpu->iem.s.fPrefixes = 0;
215 pVCpu->iem.s.uRexReg = 0;
216 pVCpu->iem.s.uRexB = 0;
217 pVCpu->iem.s.uRexIndex = 0;
218 pVCpu->iem.s.idxPrefix = 0;
219 pVCpu->iem.s.uVex3rdReg = 0;
220 pVCpu->iem.s.uVexLength = 0;
221 pVCpu->iem.s.fEvexStuff = 0;
222 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
223#ifdef IEM_WITH_CODE_TLB
224 pVCpu->iem.s.pbInstrBuf = NULL;
225 pVCpu->iem.s.offInstrNextByte = 0;
226 pVCpu->iem.s.offCurInstrStart = 0;
227# ifdef VBOX_STRICT
228 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
229 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
230 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
231# endif
232#else
233 pVCpu->iem.s.offOpcode = 0;
234 pVCpu->iem.s.cbOpcode = 0;
235#endif
236 pVCpu->iem.s.offModRm = 0;
237 pVCpu->iem.s.cActiveMappings = 0;
238 pVCpu->iem.s.iNextMapping = 0;
239 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
240 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
241 pVCpu->iem.s.fDisregardLock = fDisregardLock;
242
243#ifdef DBGFTRACE_ENABLED
244 switch (enmMode)
245 {
246 case IEMMODE_64BIT:
247 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
248 break;
249 case IEMMODE_32BIT:
250 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
251 break;
252 case IEMMODE_16BIT:
253 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
254 break;
255 }
256#endif
257}
258
259
260/**
261 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
262 *
263 * This is mostly a copy of iemInitDecoder.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
268{
269 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
278
279 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
280 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
281 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
282 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
283 pVCpu->iem.s.enmEffAddrMode = enmMode;
284 if (enmMode != IEMMODE_64BIT)
285 {
286 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffOpSize = enmMode;
288 }
289 else
290 {
291 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
292 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
293 }
294 pVCpu->iem.s.fPrefixes = 0;
295 pVCpu->iem.s.uRexReg = 0;
296 pVCpu->iem.s.uRexB = 0;
297 pVCpu->iem.s.uRexIndex = 0;
298 pVCpu->iem.s.idxPrefix = 0;
299 pVCpu->iem.s.uVex3rdReg = 0;
300 pVCpu->iem.s.uVexLength = 0;
301 pVCpu->iem.s.fEvexStuff = 0;
302 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
303#ifdef IEM_WITH_CODE_TLB
304 if (pVCpu->iem.s.pbInstrBuf)
305 {
306 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
307 - pVCpu->iem.s.uInstrBufPc;
308 if (off < pVCpu->iem.s.cbInstrBufTotal)
309 {
310 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316 }
317 else
318 {
319 pVCpu->iem.s.pbInstrBuf = NULL;
320 pVCpu->iem.s.offInstrNextByte = 0;
321 pVCpu->iem.s.offCurInstrStart = 0;
322 pVCpu->iem.s.cbInstrBuf = 0;
323 pVCpu->iem.s.cbInstrBufTotal = 0;
324 }
325 }
326 else
327 {
328 pVCpu->iem.s.offInstrNextByte = 0;
329 pVCpu->iem.s.offCurInstrStart = 0;
330 pVCpu->iem.s.cbInstrBuf = 0;
331 pVCpu->iem.s.cbInstrBufTotal = 0;
332 }
333#else
334 pVCpu->iem.s.cbOpcode = 0;
335 pVCpu->iem.s.offOpcode = 0;
336#endif
337 pVCpu->iem.s.offModRm = 0;
338 Assert(pVCpu->iem.s.cActiveMappings == 0);
339 pVCpu->iem.s.iNextMapping = 0;
340 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
341 Assert(pVCpu->iem.s.fBypassHandlers == false);
342
343#ifdef DBGFTRACE_ENABLED
344 switch (enmMode)
345 {
346 case IEMMODE_64BIT:
347 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
348 break;
349 case IEMMODE_32BIT:
350 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
351 break;
352 case IEMMODE_16BIT:
353 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
354 break;
355 }
356#endif
357}
358
359
360
361/**
362 * Prefetch opcodes the first time when starting executing.
363 *
364 * @returns Strict VBox status code.
365 * @param pVCpu The cross context virtual CPU structure of the
366 * calling thread.
367 * @param fBypassHandlers Whether to bypass access handlers.
368 * @param fDisregardLock Whether to disregard LOCK prefixes.
369 *
370 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
371 * store them as such.
372 */
373static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
374{
375 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
376
377#ifdef IEM_WITH_CODE_TLB
378 /** @todo Do ITLB lookup here. */
379
380#else /* !IEM_WITH_CODE_TLB */
381
382 /*
383 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
384 *
385 * First translate CS:rIP to a physical address.
386 */
387 uint32_t cbToTryRead;
388 RTGCPTR GCPtrPC;
389 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
390 {
391 cbToTryRead = GUEST_PAGE_SIZE;
392 GCPtrPC = pVCpu->cpum.GstCtx.rip;
393 if (IEM_IS_CANONICAL(GCPtrPC))
394 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
395 else
396 return iemRaiseGeneralProtectionFault0(pVCpu);
397 }
398 else
399 {
400 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
401 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
402 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
403 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
404 else
405 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
406 if (cbToTryRead) { /* likely */ }
407 else /* overflowed */
408 {
409 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
410 cbToTryRead = UINT32_MAX;
411 }
412 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
413 Assert(GCPtrPC <= UINT32_MAX);
414 }
415
416 PGMPTWALK Walk;
417 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
418 if (RT_SUCCESS(rc))
419 Assert(Walk.fSucceeded); /* probable. */
420 else
421 {
422 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
424 if (Walk.fFailed & PGM_WALKFAIL_EPT)
425 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
426#endif
427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
428 }
429 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
430 else
431 {
432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
436#endif
437 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
438 }
439 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
440 else
441 {
442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
445 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
446#endif
447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
448 }
449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
450 /** @todo Check reserved bits and such stuff. PGM is better at doing
451 * that, so do it when implementing the guest virtual address
452 * TLB... */
453
454 /*
455 * Read the bytes at this address.
456 */
457 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 if (cbToTryRead > cbLeftOnPage)
459 cbToTryRead = cbLeftOnPage;
460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
462
463 if (!pVCpu->iem.s.fBypassHandlers)
464 {
465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
467 { /* likely */ }
468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
469 {
470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
473 }
474 else
475 {
476 Log((RT_SUCCESS(rcStrict)
477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
480 return rcStrict;
481 }
482 }
483 else
484 {
485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
486 if (RT_SUCCESS(rc))
487 { /* likely */ }
488 else
489 {
490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
491 GCPtrPC, GCPhys, rc, cbToTryRead));
492 return rc;
493 }
494 }
495 pVCpu->iem.s.cbOpcode = cbToTryRead;
496#endif /* !IEM_WITH_CODE_TLB */
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Invalidates the IEM TLBs.
503 *
504 * This is called internally as well as by PGM when moving GC mappings.
505 *
506 * @returns
507 * @param pVCpu The cross context virtual CPU structure of the calling
508 * thread.
509 */
510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
511{
512#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
513 Log10(("IEMTlbInvalidateAll\n"));
514# ifdef IEM_WITH_CODE_TLB
515 pVCpu->iem.s.cbInstrBufTotal = 0;
516 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
517 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
518 { /* very likely */ }
519 else
520 {
521 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
522 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
523 while (i-- > 0)
524 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
525 }
526# endif
527
528# ifdef IEM_WITH_DATA_TLB
529 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
530 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
531 { /* very likely */ }
532 else
533 {
534 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
535 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
536 while (i-- > 0)
537 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
538 }
539# endif
540#else
541 RT_NOREF(pVCpu);
542#endif
543}
544
545
546/**
547 * Invalidates a page in the TLBs.
548 *
549 * @param pVCpu The cross context virtual CPU structure of the calling
550 * thread.
551 * @param GCPtr The address of the page to invalidate
552 * @thread EMT(pVCpu)
553 */
554VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
555{
556#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
557 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
558 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
559 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
560 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
561
562# ifdef IEM_WITH_CODE_TLB
563 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
564 {
565 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
566 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
567 pVCpu->iem.s.cbInstrBufTotal = 0;
568 }
569# endif
570
571# ifdef IEM_WITH_DATA_TLB
572 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
573 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
574# endif
575#else
576 NOREF(pVCpu); NOREF(GCPtr);
577#endif
578}
579
580
581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
582/**
583 * Invalid both TLBs slow fashion following a rollover.
584 *
585 * Worker for IEMTlbInvalidateAllPhysical,
586 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
587 * iemMemMapJmp and others.
588 *
589 * @thread EMT(pVCpu)
590 */
591static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
592{
593 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
594 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
595 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
596
597 unsigned i;
598# ifdef IEM_WITH_CODE_TLB
599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
600 while (i-- > 0)
601 {
602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
604 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
605 }
606# endif
607# ifdef IEM_WITH_DATA_TLB
608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
609 while (i-- > 0)
610 {
611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
613 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
614 }
615# endif
616
617}
618#endif
619
620
621/**
622 * Invalidates the host physical aspects of the IEM TLBs.
623 *
624 * This is called internally as well as by PGM when moving GC mappings.
625 *
626 * @param pVCpu The cross context virtual CPU structure of the calling
627 * thread.
628 * @note Currently not used.
629 */
630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
631{
632#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
633 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
634 Log10(("IEMTlbInvalidateAllPhysical\n"));
635
636# ifdef IEM_WITH_CODE_TLB
637 pVCpu->iem.s.cbInstrBufTotal = 0;
638# endif
639 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
640 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
641 {
642 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
643 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
644 }
645 else
646 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
647#else
648 NOREF(pVCpu);
649#endif
650}
651
652
653/**
654 * Invalidates the host physical aspects of the IEM TLBs.
655 *
656 * This is called internally as well as by PGM when moving GC mappings.
657 *
658 * @param pVM The cross context VM structure.
659 * @param idCpuCaller The ID of the calling EMT if available to the caller,
660 * otherwise NIL_VMCPUID.
661 *
662 * @remarks Caller holds the PGM lock.
663 */
664VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
665{
666#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
667 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
668 if (pVCpuCaller)
669 VMCPU_ASSERT_EMT(pVCpuCaller);
670 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
671
672 VMCC_FOR_EACH_VMCPU(pVM)
673 {
674# ifdef IEM_WITH_CODE_TLB
675 if (pVCpuCaller == pVCpu)
676 pVCpu->iem.s.cbInstrBufTotal = 0;
677# endif
678
679 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
680 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
681 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
682 { /* likely */}
683 else if (pVCpuCaller == pVCpu)
684 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
685 else
686 {
687 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
688 continue;
689 }
690 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
691 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
692 }
693 VMCC_FOR_EACH_VMCPU_END(pVM);
694
695#else
696 RT_NOREF(pVM, idCpuCaller);
697#endif
698}
699
700#ifdef IEM_WITH_CODE_TLB
701
702/**
703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
704 * failure and jumps.
705 *
706 * We end up here for a number of reasons:
707 * - pbInstrBuf isn't yet initialized.
708 * - Advancing beyond the buffer boundrary (e.g. cross page).
709 * - Advancing beyond the CS segment limit.
710 * - Fetching from non-mappable page (e.g. MMIO).
711 *
712 * @param pVCpu The cross context virtual CPU structure of the
713 * calling thread.
714 * @param pvDst Where to return the bytes.
715 * @param cbDst Number of bytes to read.
716 *
717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
718 */
719void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
720{
721#ifdef IN_RING3
722 for (;;)
723 {
724 Assert(cbDst <= 8);
725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
726
727 /*
728 * We might have a partial buffer match, deal with that first to make the
729 * rest simpler. This is the first part of the cross page/buffer case.
730 */
731 if (pVCpu->iem.s.pbInstrBuf != NULL)
732 {
733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
734 {
735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
738
739 cbDst -= cbCopy;
740 pvDst = (uint8_t *)pvDst + cbCopy;
741 offBuf += cbCopy;
742 pVCpu->iem.s.offInstrNextByte += offBuf;
743 }
744 }
745
746 /*
747 * Check segment limit, figuring how much we're allowed to access at this point.
748 *
749 * We will fault immediately if RIP is past the segment limit / in non-canonical
750 * territory. If we do continue, there are one or more bytes to read before we
751 * end up in trouble and we need to do that first before faulting.
752 */
753 RTGCPTR GCPtrFirst;
754 uint32_t cbMaxRead;
755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
756 {
757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
759 { /* likely */ }
760 else
761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
763 }
764 else
765 {
766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
767 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
769 { /* likely */ }
770 else /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
773 if (cbMaxRead != 0)
774 { /* likely */ }
775 else
776 {
777 /* Overflowed because address is 0 and limit is max. */
778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
779 cbMaxRead = X86_PAGE_SIZE;
780 }
781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
783 if (cbMaxRead2 < cbMaxRead)
784 cbMaxRead = cbMaxRead2;
785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
786 }
787
788 /*
789 * Get the TLB entry for this piece of code.
790 */
791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
793 if (pTlbe->uTag == uTag)
794 {
795 /* likely when executing lots of code, otherwise unlikely */
796# ifdef VBOX_WITH_STATISTICS
797 pVCpu->iem.s.CodeTlb.cTlbHits++;
798# endif
799 }
800 else
801 {
802 pVCpu->iem.s.CodeTlb.cTlbMisses++;
803 PGMPTWALK Walk;
804 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
805 if (RT_FAILURE(rc))
806 {
807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
808 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
809 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
810#endif
811 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
813 }
814
815 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
816 Assert(Walk.fSucceeded);
817 pTlbe->uTag = uTag;
818 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
819 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
820 pTlbe->GCPhys = Walk.GCPhys;
821 pTlbe->pbMappingR3 = NULL;
822 }
823
824 /*
825 * Check TLB page table level access flags.
826 */
827 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
828 {
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
830 {
831 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
833 }
834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
835 {
836 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
838 }
839 }
840
841 /*
842 * Look up the physical page info if necessary.
843 */
844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
845 { /* not necessary */ }
846 else
847 {
848 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
849 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
850 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
851 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
852 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
853 { /* likely */ }
854 else
855 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
856 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
857 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
858 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
859 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
860 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
861 }
862
863# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
864 /*
865 * Try do a direct read using the pbMappingR3 pointer.
866 */
867 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
868 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
869 {
870 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
871 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
872 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
873 {
874 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
875 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
876 }
877 else
878 {
879 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
880 Assert(cbInstr < cbMaxRead);
881 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
882 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
883 }
884 if (cbDst <= cbMaxRead)
885 {
886 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
887 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
888 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
889 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
890 return;
891 }
892 pVCpu->iem.s.pbInstrBuf = NULL;
893
894 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
895 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
896 }
897 else
898# endif
899#if 0
900 /*
901 * If there is no special read handling, so we can read a bit more and
902 * put it in the prefetch buffer.
903 */
904 if ( cbDst < cbMaxRead
905 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
906 {
907 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
908 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
909 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
910 { /* likely */ }
911 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
912 {
913 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
914 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
916 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
917 }
918 else
919 {
920 Log((RT_SUCCESS(rcStrict)
921 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
922 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
923 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
924 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
925 }
926 }
927 /*
928 * Special read handling, so only read exactly what's needed.
929 * This is a highly unlikely scenario.
930 */
931 else
932#endif
933 {
934 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
935 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
936 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
937 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
938 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
939 { /* likely */ }
940 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
943 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
944 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
945 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
946 }
947 else
948 {
949 Log((RT_SUCCESS(rcStrict)
950 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
951 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
952 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
953 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
954 }
955 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
956 if (cbToRead == cbDst)
957 return;
958 }
959
960 /*
961 * More to read, loop.
962 */
963 cbDst -= cbMaxRead;
964 pvDst = (uint8_t *)pvDst + cbMaxRead;
965 }
966#else
967 RT_NOREF(pvDst, cbDst);
968 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
969#endif
970}
971
972#else
973
974/**
975 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
976 * exception if it fails.
977 *
978 * @returns Strict VBox status code.
979 * @param pVCpu The cross context virtual CPU structure of the
980 * calling thread.
981 * @param cbMin The minimum number of bytes relative offOpcode
982 * that must be read.
983 */
984VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
985{
986 /*
987 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
988 *
989 * First translate CS:rIP to a physical address.
990 */
991 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
992 uint32_t cbToTryRead;
993 RTGCPTR GCPtrNext;
994 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
995 {
996 cbToTryRead = GUEST_PAGE_SIZE;
997 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
998 if (!IEM_IS_CANONICAL(GCPtrNext))
999 return iemRaiseGeneralProtectionFault0(pVCpu);
1000 }
1001 else
1002 {
1003 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1004 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1005 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1006 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1007 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1008 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1009 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1010 if (!cbToTryRead) /* overflowed */
1011 {
1012 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1013 cbToTryRead = UINT32_MAX;
1014 /** @todo check out wrapping around the code segment. */
1015 }
1016 if (cbToTryRead < cbMin - cbLeft)
1017 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1018 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1019 }
1020
1021 /* Only read up to the end of the page, and make sure we don't read more
1022 than the opcode buffer can hold. */
1023 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1024 if (cbToTryRead > cbLeftOnPage)
1025 cbToTryRead = cbLeftOnPage;
1026 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1027 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1028/** @todo r=bird: Convert assertion into undefined opcode exception? */
1029 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1030
1031 PGMPTWALK Walk;
1032 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1033 if (RT_FAILURE(rc))
1034 {
1035 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1036#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1037 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1038 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1039#endif
1040 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1041 }
1042 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1043 {
1044 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1045#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1046 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1047 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1048#endif
1049 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1050 }
1051 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1052 {
1053 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1054#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1055 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1056 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1057#endif
1058 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1059 }
1060 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1061 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1062 /** @todo Check reserved bits and such stuff. PGM is better at doing
1063 * that, so do it when implementing the guest virtual address
1064 * TLB... */
1065
1066 /*
1067 * Read the bytes at this address.
1068 *
1069 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1070 * and since PATM should only patch the start of an instruction there
1071 * should be no need to check again here.
1072 */
1073 if (!pVCpu->iem.s.fBypassHandlers)
1074 {
1075 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1076 cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1102 return rc;
1103 }
1104 }
1105 pVCpu->iem.s.cbOpcode += cbToTryRead;
1106 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1107
1108 return VINF_SUCCESS;
1109}
1110
1111#endif /* !IEM_WITH_CODE_TLB */
1112#ifndef IEM_WITH_SETJMP
1113
1114/**
1115 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pVCpu The cross context virtual CPU structure of the
1119 * calling thread.
1120 * @param pb Where to return the opcode byte.
1121 */
1122VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1123{
1124 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1125 if (rcStrict == VINF_SUCCESS)
1126 {
1127 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1128 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1129 pVCpu->iem.s.offOpcode = offOpcode + 1;
1130 }
1131 else
1132 *pb = 0;
1133 return rcStrict;
1134}
1135
1136#else /* IEM_WITH_SETJMP */
1137
1138/**
1139 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1140 *
1141 * @returns The opcode byte.
1142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1143 */
1144uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1145{
1146# ifdef IEM_WITH_CODE_TLB
1147 uint8_t u8;
1148 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1149 return u8;
1150# else
1151 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1152 if (rcStrict == VINF_SUCCESS)
1153 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1154 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1155# endif
1156}
1157
1158#endif /* IEM_WITH_SETJMP */
1159
1160#ifndef IEM_WITH_SETJMP
1161
1162/**
1163 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1167 * @param pu16 Where to return the opcode dword.
1168 */
1169VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1170{
1171 uint8_t u8;
1172 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1173 if (rcStrict == VINF_SUCCESS)
1174 *pu16 = (int8_t)u8;
1175 return rcStrict;
1176}
1177
1178
1179/**
1180 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1181 *
1182 * @returns Strict VBox status code.
1183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1184 * @param pu32 Where to return the opcode dword.
1185 */
1186VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1187{
1188 uint8_t u8;
1189 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1190 if (rcStrict == VINF_SUCCESS)
1191 *pu32 = (int8_t)u8;
1192 return rcStrict;
1193}
1194
1195
1196/**
1197 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1198 *
1199 * @returns Strict VBox status code.
1200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1201 * @param pu64 Where to return the opcode qword.
1202 */
1203VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1204{
1205 uint8_t u8;
1206 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1207 if (rcStrict == VINF_SUCCESS)
1208 *pu64 = (int8_t)u8;
1209 return rcStrict;
1210}
1211
1212#endif /* !IEM_WITH_SETJMP */
1213
1214
1215#ifndef IEM_WITH_SETJMP
1216
1217/**
1218 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1219 *
1220 * @returns Strict VBox status code.
1221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1222 * @param pu16 Where to return the opcode word.
1223 */
1224VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1225{
1226 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1227 if (rcStrict == VINF_SUCCESS)
1228 {
1229 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1230# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1231 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1232# else
1233 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1234# endif
1235 pVCpu->iem.s.offOpcode = offOpcode + 2;
1236 }
1237 else
1238 *pu16 = 0;
1239 return rcStrict;
1240}
1241
1242#else /* IEM_WITH_SETJMP */
1243
1244/**
1245 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1246 *
1247 * @returns The opcode word.
1248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1249 */
1250uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1251{
1252# ifdef IEM_WITH_CODE_TLB
1253 uint16_t u16;
1254 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1255 return u16;
1256# else
1257 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1258 if (rcStrict == VINF_SUCCESS)
1259 {
1260 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1261 pVCpu->iem.s.offOpcode += 2;
1262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1263 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1264# else
1265 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1266# endif
1267 }
1268 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1269# endif
1270}
1271
1272#endif /* IEM_WITH_SETJMP */
1273
1274#ifndef IEM_WITH_SETJMP
1275
1276/**
1277 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1278 *
1279 * @returns Strict VBox status code.
1280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1281 * @param pu32 Where to return the opcode double word.
1282 */
1283VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1284{
1285 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1286 if (rcStrict == VINF_SUCCESS)
1287 {
1288 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1289 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1290 pVCpu->iem.s.offOpcode = offOpcode + 2;
1291 }
1292 else
1293 *pu32 = 0;
1294 return rcStrict;
1295}
1296
1297
1298/**
1299 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1300 *
1301 * @returns Strict VBox status code.
1302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1303 * @param pu64 Where to return the opcode quad word.
1304 */
1305VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1306{
1307 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1308 if (rcStrict == VINF_SUCCESS)
1309 {
1310 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1311 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1312 pVCpu->iem.s.offOpcode = offOpcode + 2;
1313 }
1314 else
1315 *pu64 = 0;
1316 return rcStrict;
1317}
1318
1319#endif /* !IEM_WITH_SETJMP */
1320
1321#ifndef IEM_WITH_SETJMP
1322
1323/**
1324 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1325 *
1326 * @returns Strict VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1328 * @param pu32 Where to return the opcode dword.
1329 */
1330VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1331{
1332 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1333 if (rcStrict == VINF_SUCCESS)
1334 {
1335 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1336# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1337 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1338# else
1339 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1340 pVCpu->iem.s.abOpcode[offOpcode + 1],
1341 pVCpu->iem.s.abOpcode[offOpcode + 2],
1342 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1343# endif
1344 pVCpu->iem.s.offOpcode = offOpcode + 4;
1345 }
1346 else
1347 *pu32 = 0;
1348 return rcStrict;
1349}
1350
1351#else /* IEM_WITH_SETJMP */
1352
1353/**
1354 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1355 *
1356 * @returns The opcode dword.
1357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1358 */
1359uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1360{
1361# ifdef IEM_WITH_CODE_TLB
1362 uint32_t u32;
1363 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1364 return u32;
1365# else
1366 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1367 if (rcStrict == VINF_SUCCESS)
1368 {
1369 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1370 pVCpu->iem.s.offOpcode = offOpcode + 4;
1371# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1372 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1373# else
1374 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1375 pVCpu->iem.s.abOpcode[offOpcode + 1],
1376 pVCpu->iem.s.abOpcode[offOpcode + 2],
1377 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1378# endif
1379 }
1380 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1381# endif
1382}
1383
1384#endif /* IEM_WITH_SETJMP */
1385
1386#ifndef IEM_WITH_SETJMP
1387
1388/**
1389 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1390 *
1391 * @returns Strict VBox status code.
1392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1393 * @param pu64 Where to return the opcode dword.
1394 */
1395VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1396{
1397 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1398 if (rcStrict == VINF_SUCCESS)
1399 {
1400 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1401 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1402 pVCpu->iem.s.abOpcode[offOpcode + 1],
1403 pVCpu->iem.s.abOpcode[offOpcode + 2],
1404 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1405 pVCpu->iem.s.offOpcode = offOpcode + 4;
1406 }
1407 else
1408 *pu64 = 0;
1409 return rcStrict;
1410}
1411
1412
1413/**
1414 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1415 *
1416 * @returns Strict VBox status code.
1417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1418 * @param pu64 Where to return the opcode qword.
1419 */
1420VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1421{
1422 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1423 if (rcStrict == VINF_SUCCESS)
1424 {
1425 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1426 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1427 pVCpu->iem.s.abOpcode[offOpcode + 1],
1428 pVCpu->iem.s.abOpcode[offOpcode + 2],
1429 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1430 pVCpu->iem.s.offOpcode = offOpcode + 4;
1431 }
1432 else
1433 *pu64 = 0;
1434 return rcStrict;
1435}
1436
1437#endif /* !IEM_WITH_SETJMP */
1438
1439#ifndef IEM_WITH_SETJMP
1440
1441/**
1442 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1443 *
1444 * @returns Strict VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1446 * @param pu64 Where to return the opcode qword.
1447 */
1448VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1449{
1450 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1451 if (rcStrict == VINF_SUCCESS)
1452 {
1453 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1454# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1455 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1456# else
1457 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1458 pVCpu->iem.s.abOpcode[offOpcode + 1],
1459 pVCpu->iem.s.abOpcode[offOpcode + 2],
1460 pVCpu->iem.s.abOpcode[offOpcode + 3],
1461 pVCpu->iem.s.abOpcode[offOpcode + 4],
1462 pVCpu->iem.s.abOpcode[offOpcode + 5],
1463 pVCpu->iem.s.abOpcode[offOpcode + 6],
1464 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1465# endif
1466 pVCpu->iem.s.offOpcode = offOpcode + 8;
1467 }
1468 else
1469 *pu64 = 0;
1470 return rcStrict;
1471}
1472
1473#else /* IEM_WITH_SETJMP */
1474
1475/**
1476 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1477 *
1478 * @returns The opcode qword.
1479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1480 */
1481uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1482{
1483# ifdef IEM_WITH_CODE_TLB
1484 uint64_t u64;
1485 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1486 return u64;
1487# else
1488 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1489 if (rcStrict == VINF_SUCCESS)
1490 {
1491 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1492 pVCpu->iem.s.offOpcode = offOpcode + 8;
1493# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1494 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1495# else
1496 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1497 pVCpu->iem.s.abOpcode[offOpcode + 1],
1498 pVCpu->iem.s.abOpcode[offOpcode + 2],
1499 pVCpu->iem.s.abOpcode[offOpcode + 3],
1500 pVCpu->iem.s.abOpcode[offOpcode + 4],
1501 pVCpu->iem.s.abOpcode[offOpcode + 5],
1502 pVCpu->iem.s.abOpcode[offOpcode + 6],
1503 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1504# endif
1505 }
1506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1507# endif
1508}
1509
1510#endif /* IEM_WITH_SETJMP */
1511
1512
1513
1514/** @name Misc Worker Functions.
1515 * @{
1516 */
1517
1518/**
1519 * Gets the exception class for the specified exception vector.
1520 *
1521 * @returns The class of the specified exception.
1522 * @param uVector The exception vector.
1523 */
1524static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1525{
1526 Assert(uVector <= X86_XCPT_LAST);
1527 switch (uVector)
1528 {
1529 case X86_XCPT_DE:
1530 case X86_XCPT_TS:
1531 case X86_XCPT_NP:
1532 case X86_XCPT_SS:
1533 case X86_XCPT_GP:
1534 case X86_XCPT_SX: /* AMD only */
1535 return IEMXCPTCLASS_CONTRIBUTORY;
1536
1537 case X86_XCPT_PF:
1538 case X86_XCPT_VE: /* Intel only */
1539 return IEMXCPTCLASS_PAGE_FAULT;
1540
1541 case X86_XCPT_DF:
1542 return IEMXCPTCLASS_DOUBLE_FAULT;
1543 }
1544 return IEMXCPTCLASS_BENIGN;
1545}
1546
1547
1548/**
1549 * Evaluates how to handle an exception caused during delivery of another event
1550 * (exception / interrupt).
1551 *
1552 * @returns How to handle the recursive exception.
1553 * @param pVCpu The cross context virtual CPU structure of the
1554 * calling thread.
1555 * @param fPrevFlags The flags of the previous event.
1556 * @param uPrevVector The vector of the previous event.
1557 * @param fCurFlags The flags of the current exception.
1558 * @param uCurVector The vector of the current exception.
1559 * @param pfXcptRaiseInfo Where to store additional information about the
1560 * exception condition. Optional.
1561 */
1562VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1563 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1564{
1565 /*
1566 * Only CPU exceptions can be raised while delivering other events, software interrupt
1567 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1568 */
1569 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1570 Assert(pVCpu); RT_NOREF(pVCpu);
1571 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1572
1573 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1574 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1575 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1576 {
1577 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1578 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1579 {
1580 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1581 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1582 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1583 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1584 {
1585 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1586 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1587 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1588 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1589 uCurVector, pVCpu->cpum.GstCtx.cr2));
1590 }
1591 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1592 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1593 {
1594 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1595 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1596 }
1597 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1598 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1599 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1600 {
1601 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1602 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1603 }
1604 }
1605 else
1606 {
1607 if (uPrevVector == X86_XCPT_NMI)
1608 {
1609 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1610 if (uCurVector == X86_XCPT_PF)
1611 {
1612 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1613 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1614 }
1615 }
1616 else if ( uPrevVector == X86_XCPT_AC
1617 && uCurVector == X86_XCPT_AC)
1618 {
1619 enmRaise = IEMXCPTRAISE_CPU_HANG;
1620 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1621 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1622 }
1623 }
1624 }
1625 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1626 {
1627 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1628 if (uCurVector == X86_XCPT_PF)
1629 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1630 }
1631 else
1632 {
1633 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1634 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1635 }
1636
1637 if (pfXcptRaiseInfo)
1638 *pfXcptRaiseInfo = fRaiseInfo;
1639 return enmRaise;
1640}
1641
1642
1643/**
1644 * Enters the CPU shutdown state initiated by a triple fault or other
1645 * unrecoverable conditions.
1646 *
1647 * @returns Strict VBox status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 */
1651static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1652{
1653 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1654 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1655
1656 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1657 {
1658 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1659 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1660 }
1661
1662 RT_NOREF(pVCpu);
1663 return VINF_EM_TRIPLE_FAULT;
1664}
1665
1666
1667/**
1668 * Validates a new SS segment.
1669 *
1670 * @returns VBox strict status code.
1671 * @param pVCpu The cross context virtual CPU structure of the
1672 * calling thread.
1673 * @param NewSS The new SS selctor.
1674 * @param uCpl The CPL to load the stack for.
1675 * @param pDesc Where to return the descriptor.
1676 */
1677static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1678{
1679 /* Null selectors are not allowed (we're not called for dispatching
1680 interrupts with SS=0 in long mode). */
1681 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1684 return iemRaiseTaskSwitchFault0(pVCpu);
1685 }
1686
1687 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1688 if ((NewSS & X86_SEL_RPL) != uCpl)
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693
1694 /*
1695 * Read the descriptor.
1696 */
1697 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1698 if (rcStrict != VINF_SUCCESS)
1699 return rcStrict;
1700
1701 /*
1702 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1703 */
1704 if (!pDesc->Legacy.Gen.u1DescType)
1705 {
1706 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1707 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1708 }
1709
1710 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1711 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1712 {
1713 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1714 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1715 }
1716 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1717 {
1718 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1719 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1720 }
1721
1722 /* Is it there? */
1723 /** @todo testcase: Is this checked before the canonical / limit check below? */
1724 if (!pDesc->Legacy.Gen.u1Present)
1725 {
1726 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1727 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1728 }
1729
1730 return VINF_SUCCESS;
1731}
1732
1733/** @} */
1734
1735
1736/** @name Raising Exceptions.
1737 *
1738 * @{
1739 */
1740
1741
1742/**
1743 * Loads the specified stack far pointer from the TSS.
1744 *
1745 * @returns VBox strict status code.
1746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1747 * @param uCpl The CPL to load the stack for.
1748 * @param pSelSS Where to return the new stack segment.
1749 * @param puEsp Where to return the new stack pointer.
1750 */
1751static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1752{
1753 VBOXSTRICTRC rcStrict;
1754 Assert(uCpl < 4);
1755
1756 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1757 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1758 {
1759 /*
1760 * 16-bit TSS (X86TSS16).
1761 */
1762 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1763 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1764 {
1765 uint32_t off = uCpl * 4 + 2;
1766 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1767 {
1768 /** @todo check actual access pattern here. */
1769 uint32_t u32Tmp = 0; /* gcc maybe... */
1770 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1771 if (rcStrict == VINF_SUCCESS)
1772 {
1773 *puEsp = RT_LOWORD(u32Tmp);
1774 *pSelSS = RT_HIWORD(u32Tmp);
1775 return VINF_SUCCESS;
1776 }
1777 }
1778 else
1779 {
1780 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1781 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1782 }
1783 break;
1784 }
1785
1786 /*
1787 * 32-bit TSS (X86TSS32).
1788 */
1789 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1790 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1791 {
1792 uint32_t off = uCpl * 8 + 4;
1793 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1794 {
1795/** @todo check actual access pattern here. */
1796 uint64_t u64Tmp;
1797 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1798 if (rcStrict == VINF_SUCCESS)
1799 {
1800 *puEsp = u64Tmp & UINT32_MAX;
1801 *pSelSS = (RTSEL)(u64Tmp >> 32);
1802 return VINF_SUCCESS;
1803 }
1804 }
1805 else
1806 {
1807 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1808 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1809 }
1810 break;
1811 }
1812
1813 default:
1814 AssertFailed();
1815 rcStrict = VERR_IEM_IPE_4;
1816 break;
1817 }
1818
1819 *puEsp = 0; /* make gcc happy */
1820 *pSelSS = 0; /* make gcc happy */
1821 return rcStrict;
1822}
1823
1824
1825/**
1826 * Loads the specified stack pointer from the 64-bit TSS.
1827 *
1828 * @returns VBox strict status code.
1829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1830 * @param uCpl The CPL to load the stack for.
1831 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1832 * @param puRsp Where to return the new stack pointer.
1833 */
1834static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1835{
1836 Assert(uCpl < 4);
1837 Assert(uIst < 8);
1838 *puRsp = 0; /* make gcc happy */
1839
1840 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1841 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1842
1843 uint32_t off;
1844 if (uIst)
1845 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1846 else
1847 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1848 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1849 {
1850 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1851 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1852 }
1853
1854 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1855}
1856
1857
1858/**
1859 * Adjust the CPU state according to the exception being raised.
1860 *
1861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1862 * @param u8Vector The exception that has been raised.
1863 */
1864DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1865{
1866 switch (u8Vector)
1867 {
1868 case X86_XCPT_DB:
1869 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1870 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1871 break;
1872 /** @todo Read the AMD and Intel exception reference... */
1873 }
1874}
1875
1876
1877/**
1878 * Implements exceptions and interrupts for real mode.
1879 *
1880 * @returns VBox strict status code.
1881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1882 * @param cbInstr The number of bytes to offset rIP by in the return
1883 * address.
1884 * @param u8Vector The interrupt / exception vector number.
1885 * @param fFlags The flags.
1886 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1887 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1888 */
1889static VBOXSTRICTRC
1890iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1891 uint8_t cbInstr,
1892 uint8_t u8Vector,
1893 uint32_t fFlags,
1894 uint16_t uErr,
1895 uint64_t uCr2) RT_NOEXCEPT
1896{
1897 NOREF(uErr); NOREF(uCr2);
1898 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1899
1900 /*
1901 * Read the IDT entry.
1902 */
1903 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1904 {
1905 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1906 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1907 }
1908 RTFAR16 Idte;
1909 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1910 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1911 {
1912 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1913 return rcStrict;
1914 }
1915
1916 /*
1917 * Push the stack frame.
1918 */
1919 uint16_t *pu16Frame;
1920 uint64_t uNewRsp;
1921 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1922 if (rcStrict != VINF_SUCCESS)
1923 return rcStrict;
1924
1925 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1926#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1927 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1928 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1929 fEfl |= UINT16_C(0xf000);
1930#endif
1931 pu16Frame[2] = (uint16_t)fEfl;
1932 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1933 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1934 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1935 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1936 return rcStrict;
1937
1938 /*
1939 * Load the vector address into cs:ip and make exception specific state
1940 * adjustments.
1941 */
1942 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1943 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1944 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1945 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1946 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1947 pVCpu->cpum.GstCtx.rip = Idte.off;
1948 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1949 IEMMISC_SET_EFL(pVCpu, fEfl);
1950
1951 /** @todo do we actually do this in real mode? */
1952 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1953 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1954
1955 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1956}
1957
1958
1959/**
1960 * Loads a NULL data selector into when coming from V8086 mode.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1963 * @param pSReg Pointer to the segment register.
1964 */
1965DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1966{
1967 pSReg->Sel = 0;
1968 pSReg->ValidSel = 0;
1969 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1970 {
1971 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1972 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1973 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1974 }
1975 else
1976 {
1977 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1978 /** @todo check this on AMD-V */
1979 pSReg->u64Base = 0;
1980 pSReg->u32Limit = 0;
1981 }
1982}
1983
1984
1985/**
1986 * Loads a segment selector during a task switch in V8086 mode.
1987 *
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The selector value to load.
1990 */
1991DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1992{
1993 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1994 pSReg->Sel = uSel;
1995 pSReg->ValidSel = uSel;
1996 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1997 pSReg->u64Base = uSel << 4;
1998 pSReg->u32Limit = 0xffff;
1999 pSReg->Attr.u = 0xf3;
2000}
2001
2002
2003/**
2004 * Loads a segment selector during a task switch in protected mode.
2005 *
2006 * In this task switch scenario, we would throw \#TS exceptions rather than
2007 * \#GPs.
2008 *
2009 * @returns VBox strict status code.
2010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2011 * @param pSReg Pointer to the segment register.
2012 * @param uSel The new selector value.
2013 *
2014 * @remarks This does _not_ handle CS or SS.
2015 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2016 */
2017static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2018{
2019 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2020
2021 /* Null data selector. */
2022 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2023 {
2024 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2026 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2027 return VINF_SUCCESS;
2028 }
2029
2030 /* Fetch the descriptor. */
2031 IEMSELDESC Desc;
2032 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2033 if (rcStrict != VINF_SUCCESS)
2034 {
2035 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2036 VBOXSTRICTRC_VAL(rcStrict)));
2037 return rcStrict;
2038 }
2039
2040 /* Must be a data segment or readable code segment. */
2041 if ( !Desc.Legacy.Gen.u1DescType
2042 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2043 {
2044 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2045 Desc.Legacy.Gen.u4Type));
2046 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2047 }
2048
2049 /* Check privileges for data segments and non-conforming code segments. */
2050 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2051 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2052 {
2053 /* The RPL and the new CPL must be less than or equal to the DPL. */
2054 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2055 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2056 {
2057 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2058 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2059 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2060 }
2061 }
2062
2063 /* Is it there? */
2064 if (!Desc.Legacy.Gen.u1Present)
2065 {
2066 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2067 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2068 }
2069
2070 /* The base and limit. */
2071 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2072 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2073
2074 /*
2075 * Ok, everything checked out fine. Now set the accessed bit before
2076 * committing the result into the registers.
2077 */
2078 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2079 {
2080 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2081 if (rcStrict != VINF_SUCCESS)
2082 return rcStrict;
2083 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2084 }
2085
2086 /* Commit */
2087 pSReg->Sel = uSel;
2088 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2089 pSReg->u32Limit = cbLimit;
2090 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2091 pSReg->ValidSel = uSel;
2092 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2093 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2094 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2095
2096 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2097 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2098 return VINF_SUCCESS;
2099}
2100
2101
2102/**
2103 * Performs a task switch.
2104 *
2105 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2106 * caller is responsible for performing the necessary checks (like DPL, TSS
2107 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2108 * reference for JMP, CALL, IRET.
2109 *
2110 * If the task switch is the due to a software interrupt or hardware exception,
2111 * the caller is responsible for validating the TSS selector and descriptor. See
2112 * Intel Instruction reference for INT n.
2113 *
2114 * @returns VBox strict status code.
2115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2116 * @param enmTaskSwitch The cause of the task switch.
2117 * @param uNextEip The EIP effective after the task switch.
2118 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2119 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2120 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2121 * @param SelTSS The TSS selector of the new task.
2122 * @param pNewDescTSS Pointer to the new TSS descriptor.
2123 */
2124VBOXSTRICTRC
2125iemTaskSwitch(PVMCPUCC pVCpu,
2126 IEMTASKSWITCH enmTaskSwitch,
2127 uint32_t uNextEip,
2128 uint32_t fFlags,
2129 uint16_t uErr,
2130 uint64_t uCr2,
2131 RTSEL SelTSS,
2132 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2133{
2134 Assert(!IEM_IS_REAL_MODE(pVCpu));
2135 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2136 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2137
2138 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2139 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2140 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2141 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2142 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2143
2144 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2145 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2146
2147 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2148 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2149
2150 /* Update CR2 in case it's a page-fault. */
2151 /** @todo This should probably be done much earlier in IEM/PGM. See
2152 * @bugref{5653#c49}. */
2153 if (fFlags & IEM_XCPT_FLAGS_CR2)
2154 pVCpu->cpum.GstCtx.cr2 = uCr2;
2155
2156 /*
2157 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2158 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2159 */
2160 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2161 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2162 if (uNewTSSLimit < uNewTSSLimitMin)
2163 {
2164 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2165 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2166 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2167 }
2168
2169 /*
2170 * Task switches in VMX non-root mode always cause task switches.
2171 * The new TSS must have been read and validated (DPL, limits etc.) before a
2172 * task-switch VM-exit commences.
2173 *
2174 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2175 */
2176 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2177 {
2178 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2179 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2180 }
2181
2182 /*
2183 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2184 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2185 */
2186 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2187 {
2188 uint32_t const uExitInfo1 = SelTSS;
2189 uint32_t uExitInfo2 = uErr;
2190 switch (enmTaskSwitch)
2191 {
2192 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2193 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2194 default: break;
2195 }
2196 if (fFlags & IEM_XCPT_FLAGS_ERR)
2197 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2198 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2199 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2200
2201 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2202 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2203 RT_NOREF2(uExitInfo1, uExitInfo2);
2204 }
2205
2206 /*
2207 * Check the current TSS limit. The last written byte to the current TSS during the
2208 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2209 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2210 *
2211 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2212 * end up with smaller than "legal" TSS limits.
2213 */
2214 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2215 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2216 if (uCurTSSLimit < uCurTSSLimitMin)
2217 {
2218 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2219 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2220 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2221 }
2222
2223 /*
2224 * Verify that the new TSS can be accessed and map it. Map only the required contents
2225 * and not the entire TSS.
2226 */
2227 void *pvNewTSS;
2228 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2229 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2230 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2231 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2232 * not perform correct translation if this happens. See Intel spec. 7.2.1
2233 * "Task-State Segment". */
2234 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2235 if (rcStrict != VINF_SUCCESS)
2236 {
2237 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2238 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2239 return rcStrict;
2240 }
2241
2242 /*
2243 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2244 */
2245 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2246 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2247 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2248 {
2249 PX86DESC pDescCurTSS;
2250 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2251 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2252 if (rcStrict != VINF_SUCCESS)
2253 {
2254 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2255 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2256 return rcStrict;
2257 }
2258
2259 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2260 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2261 if (rcStrict != VINF_SUCCESS)
2262 {
2263 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2264 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2265 return rcStrict;
2266 }
2267
2268 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2269 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2270 {
2271 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2272 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2273 fEFlags &= ~X86_EFL_NT;
2274 }
2275 }
2276
2277 /*
2278 * Save the CPU state into the current TSS.
2279 */
2280 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2281 if (GCPtrNewTSS == GCPtrCurTSS)
2282 {
2283 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2284 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2285 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2286 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2287 pVCpu->cpum.GstCtx.ldtr.Sel));
2288 }
2289 if (fIsNewTSS386)
2290 {
2291 /*
2292 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2293 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2294 */
2295 void *pvCurTSS32;
2296 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2297 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2298 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2299 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2300 if (rcStrict != VINF_SUCCESS)
2301 {
2302 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2303 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2304 return rcStrict;
2305 }
2306
2307 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2308 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2309 pCurTSS32->eip = uNextEip;
2310 pCurTSS32->eflags = fEFlags;
2311 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2312 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2313 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2314 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2315 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2316 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2317 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2318 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2319 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2320 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2321 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2322 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2323 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2324 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2325
2326 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2327 if (rcStrict != VINF_SUCCESS)
2328 {
2329 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2330 VBOXSTRICTRC_VAL(rcStrict)));
2331 return rcStrict;
2332 }
2333 }
2334 else
2335 {
2336 /*
2337 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2338 */
2339 void *pvCurTSS16;
2340 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2341 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2342 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2343 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2344 if (rcStrict != VINF_SUCCESS)
2345 {
2346 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2347 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2348 return rcStrict;
2349 }
2350
2351 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2352 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2353 pCurTSS16->ip = uNextEip;
2354 pCurTSS16->flags = (uint16_t)fEFlags;
2355 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2356 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2357 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2358 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2359 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2360 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2361 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2362 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2363 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2364 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2365 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2366 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2367
2368 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2369 if (rcStrict != VINF_SUCCESS)
2370 {
2371 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2372 VBOXSTRICTRC_VAL(rcStrict)));
2373 return rcStrict;
2374 }
2375 }
2376
2377 /*
2378 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2379 */
2380 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2381 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2382 {
2383 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2384 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2385 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2386 }
2387
2388 /*
2389 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2390 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2391 */
2392 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2393 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2394 bool fNewDebugTrap;
2395 if (fIsNewTSS386)
2396 {
2397 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2398 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2399 uNewEip = pNewTSS32->eip;
2400 uNewEflags = pNewTSS32->eflags;
2401 uNewEax = pNewTSS32->eax;
2402 uNewEcx = pNewTSS32->ecx;
2403 uNewEdx = pNewTSS32->edx;
2404 uNewEbx = pNewTSS32->ebx;
2405 uNewEsp = pNewTSS32->esp;
2406 uNewEbp = pNewTSS32->ebp;
2407 uNewEsi = pNewTSS32->esi;
2408 uNewEdi = pNewTSS32->edi;
2409 uNewES = pNewTSS32->es;
2410 uNewCS = pNewTSS32->cs;
2411 uNewSS = pNewTSS32->ss;
2412 uNewDS = pNewTSS32->ds;
2413 uNewFS = pNewTSS32->fs;
2414 uNewGS = pNewTSS32->gs;
2415 uNewLdt = pNewTSS32->selLdt;
2416 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2417 }
2418 else
2419 {
2420 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2421 uNewCr3 = 0;
2422 uNewEip = pNewTSS16->ip;
2423 uNewEflags = pNewTSS16->flags;
2424 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2425 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2426 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2427 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2428 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2429 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2430 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2431 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2432 uNewES = pNewTSS16->es;
2433 uNewCS = pNewTSS16->cs;
2434 uNewSS = pNewTSS16->ss;
2435 uNewDS = pNewTSS16->ds;
2436 uNewFS = 0;
2437 uNewGS = 0;
2438 uNewLdt = pNewTSS16->selLdt;
2439 fNewDebugTrap = false;
2440 }
2441
2442 if (GCPtrNewTSS == GCPtrCurTSS)
2443 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2444 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2445
2446 /*
2447 * We're done accessing the new TSS.
2448 */
2449 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2450 if (rcStrict != VINF_SUCCESS)
2451 {
2452 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2453 return rcStrict;
2454 }
2455
2456 /*
2457 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2458 */
2459 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2460 {
2461 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2462 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2463 if (rcStrict != VINF_SUCCESS)
2464 {
2465 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2466 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2467 return rcStrict;
2468 }
2469
2470 /* Check that the descriptor indicates the new TSS is available (not busy). */
2471 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2472 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2473 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2474
2475 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2477 if (rcStrict != VINF_SUCCESS)
2478 {
2479 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2480 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2481 return rcStrict;
2482 }
2483 }
2484
2485 /*
2486 * From this point on, we're technically in the new task. We will defer exceptions
2487 * until the completion of the task switch but before executing any instructions in the new task.
2488 */
2489 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2490 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2491 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2492 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2493 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2494 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2495 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2496
2497 /* Set the busy bit in TR. */
2498 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2499
2500 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2501 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2502 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2503 {
2504 uNewEflags |= X86_EFL_NT;
2505 }
2506
2507 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2508 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2509 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2510
2511 pVCpu->cpum.GstCtx.eip = uNewEip;
2512 pVCpu->cpum.GstCtx.eax = uNewEax;
2513 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2514 pVCpu->cpum.GstCtx.edx = uNewEdx;
2515 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2516 pVCpu->cpum.GstCtx.esp = uNewEsp;
2517 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2518 pVCpu->cpum.GstCtx.esi = uNewEsi;
2519 pVCpu->cpum.GstCtx.edi = uNewEdi;
2520
2521 uNewEflags &= X86_EFL_LIVE_MASK;
2522 uNewEflags |= X86_EFL_RA1_MASK;
2523 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2524
2525 /*
2526 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2527 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2528 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2529 */
2530 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2531 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2532
2533 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2534 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2535
2536 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2537 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2538
2539 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2540 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2541
2542 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2543 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2544
2545 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2546 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2547 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2548
2549 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2550 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2551 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2552 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2553
2554 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2555 {
2556 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2557 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2558 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2559 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2560 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2561 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2562 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2563 }
2564
2565 /*
2566 * Switch CR3 for the new task.
2567 */
2568 if ( fIsNewTSS386
2569 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2570 {
2571 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2572 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2573 AssertRCSuccessReturn(rc, rc);
2574
2575 /* Inform PGM. */
2576 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2577 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2578 AssertRCReturn(rc, rc);
2579 /* ignore informational status codes */
2580
2581 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2582 }
2583
2584 /*
2585 * Switch LDTR for the new task.
2586 */
2587 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2588 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2589 else
2590 {
2591 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2592
2593 IEMSELDESC DescNewLdt;
2594 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2595 if (rcStrict != VINF_SUCCESS)
2596 {
2597 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2598 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2599 return rcStrict;
2600 }
2601 if ( !DescNewLdt.Legacy.Gen.u1Present
2602 || DescNewLdt.Legacy.Gen.u1DescType
2603 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2604 {
2605 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2606 uNewLdt, DescNewLdt.Legacy.u));
2607 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2608 }
2609
2610 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2611 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2612 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2613 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2614 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2615 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2616 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2617 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2618 }
2619
2620 IEMSELDESC DescSS;
2621 if (IEM_IS_V86_MODE(pVCpu))
2622 {
2623 pVCpu->iem.s.uCpl = 3;
2624 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2625 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2626 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2627 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2628 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2629 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2630
2631 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2632 DescSS.Legacy.u = 0;
2633 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2634 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2635 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2636 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2637 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2638 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2639 DescSS.Legacy.Gen.u2Dpl = 3;
2640 }
2641 else
2642 {
2643 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2644
2645 /*
2646 * Load the stack segment for the new task.
2647 */
2648 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2649 {
2650 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2651 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2652 }
2653
2654 /* Fetch the descriptor. */
2655 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2656 if (rcStrict != VINF_SUCCESS)
2657 {
2658 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2659 VBOXSTRICTRC_VAL(rcStrict)));
2660 return rcStrict;
2661 }
2662
2663 /* SS must be a data segment and writable. */
2664 if ( !DescSS.Legacy.Gen.u1DescType
2665 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2666 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2667 {
2668 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2669 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2670 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2671 }
2672
2673 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2674 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2675 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2676 {
2677 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2678 uNewCpl));
2679 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2680 }
2681
2682 /* Is it there? */
2683 if (!DescSS.Legacy.Gen.u1Present)
2684 {
2685 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2686 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2687 }
2688
2689 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2690 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2691
2692 /* Set the accessed bit before committing the result into SS. */
2693 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2694 {
2695 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2696 if (rcStrict != VINF_SUCCESS)
2697 return rcStrict;
2698 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2699 }
2700
2701 /* Commit SS. */
2702 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2703 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2704 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2705 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2706 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2707 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2708 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2709
2710 /* CPL has changed, update IEM before loading rest of segments. */
2711 pVCpu->iem.s.uCpl = uNewCpl;
2712
2713 /*
2714 * Load the data segments for the new task.
2715 */
2716 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2717 if (rcStrict != VINF_SUCCESS)
2718 return rcStrict;
2719 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2720 if (rcStrict != VINF_SUCCESS)
2721 return rcStrict;
2722 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2723 if (rcStrict != VINF_SUCCESS)
2724 return rcStrict;
2725 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2726 if (rcStrict != VINF_SUCCESS)
2727 return rcStrict;
2728
2729 /*
2730 * Load the code segment for the new task.
2731 */
2732 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2733 {
2734 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2735 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2736 }
2737
2738 /* Fetch the descriptor. */
2739 IEMSELDESC DescCS;
2740 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2741 if (rcStrict != VINF_SUCCESS)
2742 {
2743 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2744 return rcStrict;
2745 }
2746
2747 /* CS must be a code segment. */
2748 if ( !DescCS.Legacy.Gen.u1DescType
2749 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2750 {
2751 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2752 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2753 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2754 }
2755
2756 /* For conforming CS, DPL must be less than or equal to the RPL. */
2757 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2758 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2759 {
2760 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2761 DescCS.Legacy.Gen.u2Dpl));
2762 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2763 }
2764
2765 /* For non-conforming CS, DPL must match RPL. */
2766 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2767 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2768 {
2769 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2770 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2771 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2772 }
2773
2774 /* Is it there? */
2775 if (!DescCS.Legacy.Gen.u1Present)
2776 {
2777 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2778 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2779 }
2780
2781 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2782 u64Base = X86DESC_BASE(&DescCS.Legacy);
2783
2784 /* Set the accessed bit before committing the result into CS. */
2785 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2786 {
2787 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2788 if (rcStrict != VINF_SUCCESS)
2789 return rcStrict;
2790 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2791 }
2792
2793 /* Commit CS. */
2794 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2795 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2796 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2797 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2798 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2799 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2801 }
2802
2803 /** @todo Debug trap. */
2804 if (fIsNewTSS386 && fNewDebugTrap)
2805 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2806
2807 /*
2808 * Construct the error code masks based on what caused this task switch.
2809 * See Intel Instruction reference for INT.
2810 */
2811 uint16_t uExt;
2812 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2813 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2814 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2815 {
2816 uExt = 1;
2817 }
2818 else
2819 uExt = 0;
2820
2821 /*
2822 * Push any error code on to the new stack.
2823 */
2824 if (fFlags & IEM_XCPT_FLAGS_ERR)
2825 {
2826 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2827 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2828 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2829
2830 /* Check that there is sufficient space on the stack. */
2831 /** @todo Factor out segment limit checking for normal/expand down segments
2832 * into a separate function. */
2833 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2834 {
2835 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2836 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2837 {
2838 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2839 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2840 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2841 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2842 }
2843 }
2844 else
2845 {
2846 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2847 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2848 {
2849 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2850 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2851 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2852 }
2853 }
2854
2855
2856 if (fIsNewTSS386)
2857 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2858 else
2859 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2860 if (rcStrict != VINF_SUCCESS)
2861 {
2862 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2863 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2864 return rcStrict;
2865 }
2866 }
2867
2868 /* Check the new EIP against the new CS limit. */
2869 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2870 {
2871 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2872 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2873 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2874 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2875 }
2876
2877 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2878 pVCpu->cpum.GstCtx.ss.Sel));
2879 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2880}
2881
2882
2883/**
2884 * Implements exceptions and interrupts for protected mode.
2885 *
2886 * @returns VBox strict status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2888 * @param cbInstr The number of bytes to offset rIP by in the return
2889 * address.
2890 * @param u8Vector The interrupt / exception vector number.
2891 * @param fFlags The flags.
2892 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2893 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2894 */
2895static VBOXSTRICTRC
2896iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2897 uint8_t cbInstr,
2898 uint8_t u8Vector,
2899 uint32_t fFlags,
2900 uint16_t uErr,
2901 uint64_t uCr2) RT_NOEXCEPT
2902{
2903 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2904
2905 /*
2906 * Read the IDT entry.
2907 */
2908 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2909 {
2910 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2911 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2912 }
2913 X86DESC Idte;
2914 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2915 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2916 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2917 {
2918 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2919 return rcStrict;
2920 }
2921 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2922 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2923 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2924
2925 /*
2926 * Check the descriptor type, DPL and such.
2927 * ASSUMES this is done in the same order as described for call-gate calls.
2928 */
2929 if (Idte.Gate.u1DescType)
2930 {
2931 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2932 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2933 }
2934 bool fTaskGate = false;
2935 uint8_t f32BitGate = true;
2936 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2937 switch (Idte.Gate.u4Type)
2938 {
2939 case X86_SEL_TYPE_SYS_UNDEFINED:
2940 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2941 case X86_SEL_TYPE_SYS_LDT:
2942 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2943 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2944 case X86_SEL_TYPE_SYS_UNDEFINED2:
2945 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2946 case X86_SEL_TYPE_SYS_UNDEFINED3:
2947 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2948 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2949 case X86_SEL_TYPE_SYS_UNDEFINED4:
2950 {
2951 /** @todo check what actually happens when the type is wrong...
2952 * esp. call gates. */
2953 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2954 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2955 }
2956
2957 case X86_SEL_TYPE_SYS_286_INT_GATE:
2958 f32BitGate = false;
2959 RT_FALL_THRU();
2960 case X86_SEL_TYPE_SYS_386_INT_GATE:
2961 fEflToClear |= X86_EFL_IF;
2962 break;
2963
2964 case X86_SEL_TYPE_SYS_TASK_GATE:
2965 fTaskGate = true;
2966#ifndef IEM_IMPLEMENTS_TASKSWITCH
2967 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2968#endif
2969 break;
2970
2971 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2972 f32BitGate = false;
2973 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2974 break;
2975
2976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2977 }
2978
2979 /* Check DPL against CPL if applicable. */
2980 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2981 {
2982 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2983 {
2984 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2985 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2986 }
2987 }
2988
2989 /* Is it there? */
2990 if (!Idte.Gate.u1Present)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2993 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2994 }
2995
2996 /* Is it a task-gate? */
2997 if (fTaskGate)
2998 {
2999 /*
3000 * Construct the error code masks based on what caused this task switch.
3001 * See Intel Instruction reference for INT.
3002 */
3003 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3004 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3005 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3006 RTSEL SelTSS = Idte.Gate.u16Sel;
3007
3008 /*
3009 * Fetch the TSS descriptor in the GDT.
3010 */
3011 IEMSELDESC DescTSS;
3012 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3013 if (rcStrict != VINF_SUCCESS)
3014 {
3015 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3016 VBOXSTRICTRC_VAL(rcStrict)));
3017 return rcStrict;
3018 }
3019
3020 /* The TSS descriptor must be a system segment and be available (not busy). */
3021 if ( DescTSS.Legacy.Gen.u1DescType
3022 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3023 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3024 {
3025 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3026 u8Vector, SelTSS, DescTSS.Legacy.au64));
3027 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3028 }
3029
3030 /* The TSS must be present. */
3031 if (!DescTSS.Legacy.Gen.u1Present)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3034 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3035 }
3036
3037 /* Do the actual task switch. */
3038 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3039 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3040 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3041 }
3042
3043 /* A null CS is bad. */
3044 RTSEL NewCS = Idte.Gate.u16Sel;
3045 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3046 {
3047 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3048 return iemRaiseGeneralProtectionFault0(pVCpu);
3049 }
3050
3051 /* Fetch the descriptor for the new CS. */
3052 IEMSELDESC DescCS;
3053 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3054 if (rcStrict != VINF_SUCCESS)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3057 return rcStrict;
3058 }
3059
3060 /* Must be a code segment. */
3061 if (!DescCS.Legacy.Gen.u1DescType)
3062 {
3063 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3064 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3065 }
3066 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3067 {
3068 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3069 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3070 }
3071
3072 /* Don't allow lowering the privilege level. */
3073 /** @todo Does the lowering of privileges apply to software interrupts
3074 * only? This has bearings on the more-privileged or
3075 * same-privilege stack behavior further down. A testcase would
3076 * be nice. */
3077 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3078 {
3079 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3080 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3081 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3082 }
3083
3084 /* Make sure the selector is present. */
3085 if (!DescCS.Legacy.Gen.u1Present)
3086 {
3087 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3088 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3089 }
3090
3091 /* Check the new EIP against the new CS limit. */
3092 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3093 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3094 ? Idte.Gate.u16OffsetLow
3095 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3096 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3097 if (uNewEip > cbLimitCS)
3098 {
3099 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3100 u8Vector, uNewEip, cbLimitCS, NewCS));
3101 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3102 }
3103 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3104
3105 /* Calc the flag image to push. */
3106 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3107 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3108 fEfl &= ~X86_EFL_RF;
3109 else
3110 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3111
3112 /* From V8086 mode only go to CPL 0. */
3113 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3114 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3115 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3116 {
3117 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3118 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3119 }
3120
3121 /*
3122 * If the privilege level changes, we need to get a new stack from the TSS.
3123 * This in turns means validating the new SS and ESP...
3124 */
3125 if (uNewCpl != pVCpu->iem.s.uCpl)
3126 {
3127 RTSEL NewSS;
3128 uint32_t uNewEsp;
3129 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3130 if (rcStrict != VINF_SUCCESS)
3131 return rcStrict;
3132
3133 IEMSELDESC DescSS;
3134 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3135 if (rcStrict != VINF_SUCCESS)
3136 return rcStrict;
3137 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3138 if (!DescSS.Legacy.Gen.u1DefBig)
3139 {
3140 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3141 uNewEsp = (uint16_t)uNewEsp;
3142 }
3143
3144 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3145
3146 /* Check that there is sufficient space for the stack frame. */
3147 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3148 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3149 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3150 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3151
3152 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3153 {
3154 if ( uNewEsp - 1 > cbLimitSS
3155 || uNewEsp < cbStackFrame)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3158 u8Vector, NewSS, uNewEsp, cbStackFrame));
3159 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3160 }
3161 }
3162 else
3163 {
3164 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3165 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3166 {
3167 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3168 u8Vector, NewSS, uNewEsp, cbStackFrame));
3169 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3170 }
3171 }
3172
3173 /*
3174 * Start making changes.
3175 */
3176
3177 /* Set the new CPL so that stack accesses use it. */
3178 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3179 pVCpu->iem.s.uCpl = uNewCpl;
3180
3181 /* Create the stack frame. */
3182 RTPTRUNION uStackFrame;
3183 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3184 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3185 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3186 if (rcStrict != VINF_SUCCESS)
3187 return rcStrict;
3188 void * const pvStackFrame = uStackFrame.pv;
3189 if (f32BitGate)
3190 {
3191 if (fFlags & IEM_XCPT_FLAGS_ERR)
3192 *uStackFrame.pu32++ = uErr;
3193 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3194 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3195 uStackFrame.pu32[2] = fEfl;
3196 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3197 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3198 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3199 if (fEfl & X86_EFL_VM)
3200 {
3201 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3202 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3203 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3204 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3205 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3206 }
3207 }
3208 else
3209 {
3210 if (fFlags & IEM_XCPT_FLAGS_ERR)
3211 *uStackFrame.pu16++ = uErr;
3212 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3213 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3214 uStackFrame.pu16[2] = fEfl;
3215 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3216 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3217 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3218 if (fEfl & X86_EFL_VM)
3219 {
3220 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3221 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3222 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3223 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3224 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3225 }
3226 }
3227 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3228 if (rcStrict != VINF_SUCCESS)
3229 return rcStrict;
3230
3231 /* Mark the selectors 'accessed' (hope this is the correct time). */
3232 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3233 * after pushing the stack frame? (Write protect the gdt + stack to
3234 * find out.) */
3235 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3236 {
3237 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3238 if (rcStrict != VINF_SUCCESS)
3239 return rcStrict;
3240 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3241 }
3242
3243 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3244 {
3245 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3246 if (rcStrict != VINF_SUCCESS)
3247 return rcStrict;
3248 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3249 }
3250
3251 /*
3252 * Start comitting the register changes (joins with the DPL=CPL branch).
3253 */
3254 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3255 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3256 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3257 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3258 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3259 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3260 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3261 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3262 * SP is loaded).
3263 * Need to check the other combinations too:
3264 * - 16-bit TSS, 32-bit handler
3265 * - 32-bit TSS, 16-bit handler */
3266 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3267 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3268 else
3269 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3270
3271 if (fEfl & X86_EFL_VM)
3272 {
3273 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3274 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3275 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3276 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3277 }
3278 }
3279 /*
3280 * Same privilege, no stack change and smaller stack frame.
3281 */
3282 else
3283 {
3284 uint64_t uNewRsp;
3285 RTPTRUNION uStackFrame;
3286 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3287 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3288 if (rcStrict != VINF_SUCCESS)
3289 return rcStrict;
3290 void * const pvStackFrame = uStackFrame.pv;
3291
3292 if (f32BitGate)
3293 {
3294 if (fFlags & IEM_XCPT_FLAGS_ERR)
3295 *uStackFrame.pu32++ = uErr;
3296 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3297 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3298 uStackFrame.pu32[2] = fEfl;
3299 }
3300 else
3301 {
3302 if (fFlags & IEM_XCPT_FLAGS_ERR)
3303 *uStackFrame.pu16++ = uErr;
3304 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3305 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3306 uStackFrame.pu16[2] = fEfl;
3307 }
3308 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3309 if (rcStrict != VINF_SUCCESS)
3310 return rcStrict;
3311
3312 /* Mark the CS selector as 'accessed'. */
3313 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3314 {
3315 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3316 if (rcStrict != VINF_SUCCESS)
3317 return rcStrict;
3318 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3319 }
3320
3321 /*
3322 * Start committing the register changes (joins with the other branch).
3323 */
3324 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3325 }
3326
3327 /* ... register committing continues. */
3328 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3329 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3330 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3331 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3332 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3333 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3334
3335 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3336 fEfl &= ~fEflToClear;
3337 IEMMISC_SET_EFL(pVCpu, fEfl);
3338
3339 if (fFlags & IEM_XCPT_FLAGS_CR2)
3340 pVCpu->cpum.GstCtx.cr2 = uCr2;
3341
3342 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3343 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3344
3345 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3346}
3347
3348
3349/**
3350 * Implements exceptions and interrupts for long mode.
3351 *
3352 * @returns VBox strict status code.
3353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3354 * @param cbInstr The number of bytes to offset rIP by in the return
3355 * address.
3356 * @param u8Vector The interrupt / exception vector number.
3357 * @param fFlags The flags.
3358 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3359 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3360 */
3361static VBOXSTRICTRC
3362iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3363 uint8_t cbInstr,
3364 uint8_t u8Vector,
3365 uint32_t fFlags,
3366 uint16_t uErr,
3367 uint64_t uCr2) RT_NOEXCEPT
3368{
3369 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3370
3371 /*
3372 * Read the IDT entry.
3373 */
3374 uint16_t offIdt = (uint16_t)u8Vector << 4;
3375 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3376 {
3377 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3378 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3379 }
3380 X86DESC64 Idte;
3381#ifdef _MSC_VER /* Shut up silly compiler warning. */
3382 Idte.au64[0] = 0;
3383 Idte.au64[1] = 0;
3384#endif
3385 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3386 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3387 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3388 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3389 {
3390 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3391 return rcStrict;
3392 }
3393 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3394 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3395 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3396
3397 /*
3398 * Check the descriptor type, DPL and such.
3399 * ASSUMES this is done in the same order as described for call-gate calls.
3400 */
3401 if (Idte.Gate.u1DescType)
3402 {
3403 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3404 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3405 }
3406 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3407 switch (Idte.Gate.u4Type)
3408 {
3409 case AMD64_SEL_TYPE_SYS_INT_GATE:
3410 fEflToClear |= X86_EFL_IF;
3411 break;
3412 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3413 break;
3414
3415 default:
3416 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3417 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3418 }
3419
3420 /* Check DPL against CPL if applicable. */
3421 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3422 {
3423 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3424 {
3425 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3426 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3427 }
3428 }
3429
3430 /* Is it there? */
3431 if (!Idte.Gate.u1Present)
3432 {
3433 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3434 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3435 }
3436
3437 /* A null CS is bad. */
3438 RTSEL NewCS = Idte.Gate.u16Sel;
3439 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3440 {
3441 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3442 return iemRaiseGeneralProtectionFault0(pVCpu);
3443 }
3444
3445 /* Fetch the descriptor for the new CS. */
3446 IEMSELDESC DescCS;
3447 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3448 if (rcStrict != VINF_SUCCESS)
3449 {
3450 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3451 return rcStrict;
3452 }
3453
3454 /* Must be a 64-bit code segment. */
3455 if (!DescCS.Long.Gen.u1DescType)
3456 {
3457 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3458 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3459 }
3460 if ( !DescCS.Long.Gen.u1Long
3461 || DescCS.Long.Gen.u1DefBig
3462 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3463 {
3464 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3465 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3466 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3467 }
3468
3469 /* Don't allow lowering the privilege level. For non-conforming CS
3470 selectors, the CS.DPL sets the privilege level the trap/interrupt
3471 handler runs at. For conforming CS selectors, the CPL remains
3472 unchanged, but the CS.DPL must be <= CPL. */
3473 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3474 * when CPU in Ring-0. Result \#GP? */
3475 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3476 {
3477 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3478 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3479 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3480 }
3481
3482
3483 /* Make sure the selector is present. */
3484 if (!DescCS.Legacy.Gen.u1Present)
3485 {
3486 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3487 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3488 }
3489
3490 /* Check that the new RIP is canonical. */
3491 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3492 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3493 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3494 if (!IEM_IS_CANONICAL(uNewRip))
3495 {
3496 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3497 return iemRaiseGeneralProtectionFault0(pVCpu);
3498 }
3499
3500 /*
3501 * If the privilege level changes or if the IST isn't zero, we need to get
3502 * a new stack from the TSS.
3503 */
3504 uint64_t uNewRsp;
3505 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3506 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3507 if ( uNewCpl != pVCpu->iem.s.uCpl
3508 || Idte.Gate.u3IST != 0)
3509 {
3510 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3511 if (rcStrict != VINF_SUCCESS)
3512 return rcStrict;
3513 }
3514 else
3515 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3516 uNewRsp &= ~(uint64_t)0xf;
3517
3518 /*
3519 * Calc the flag image to push.
3520 */
3521 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3522 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3523 fEfl &= ~X86_EFL_RF;
3524 else
3525 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3526
3527 /*
3528 * Start making changes.
3529 */
3530 /* Set the new CPL so that stack accesses use it. */
3531 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3532 pVCpu->iem.s.uCpl = uNewCpl;
3533
3534 /* Create the stack frame. */
3535 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3536 RTPTRUNION uStackFrame;
3537 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3538 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3539 if (rcStrict != VINF_SUCCESS)
3540 return rcStrict;
3541 void * const pvStackFrame = uStackFrame.pv;
3542
3543 if (fFlags & IEM_XCPT_FLAGS_ERR)
3544 *uStackFrame.pu64++ = uErr;
3545 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3546 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3547 uStackFrame.pu64[2] = fEfl;
3548 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3549 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3550 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553
3554 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3555 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3556 * after pushing the stack frame? (Write protect the gdt + stack to
3557 * find out.) */
3558 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3559 {
3560 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3561 if (rcStrict != VINF_SUCCESS)
3562 return rcStrict;
3563 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3564 }
3565
3566 /*
3567 * Start comitting the register changes.
3568 */
3569 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3570 * hidden registers when interrupting 32-bit or 16-bit code! */
3571 if (uNewCpl != uOldCpl)
3572 {
3573 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3574 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3575 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3576 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3577 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3578 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3579 }
3580 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3581 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3582 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3583 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3584 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3585 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3586 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3587 pVCpu->cpum.GstCtx.rip = uNewRip;
3588
3589 fEfl &= ~fEflToClear;
3590 IEMMISC_SET_EFL(pVCpu, fEfl);
3591
3592 if (fFlags & IEM_XCPT_FLAGS_CR2)
3593 pVCpu->cpum.GstCtx.cr2 = uCr2;
3594
3595 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3596 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3597
3598 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3599}
3600
3601
3602/**
3603 * Implements exceptions and interrupts.
3604 *
3605 * All exceptions and interrupts goes thru this function!
3606 *
3607 * @returns VBox strict status code.
3608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3609 * @param cbInstr The number of bytes to offset rIP by in the return
3610 * address.
3611 * @param u8Vector The interrupt / exception vector number.
3612 * @param fFlags The flags.
3613 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3614 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3615 */
3616VBOXSTRICTRC
3617iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3618 uint8_t cbInstr,
3619 uint8_t u8Vector,
3620 uint32_t fFlags,
3621 uint16_t uErr,
3622 uint64_t uCr2) RT_NOEXCEPT
3623{
3624 /*
3625 * Get all the state that we might need here.
3626 */
3627 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3628 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3629
3630#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3631 /*
3632 * Flush prefetch buffer
3633 */
3634 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3635#endif
3636
3637 /*
3638 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3639 */
3640 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3641 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3642 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3643 | IEM_XCPT_FLAGS_BP_INSTR
3644 | IEM_XCPT_FLAGS_ICEBP_INSTR
3645 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3646 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3647 {
3648 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3649 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3650 u8Vector = X86_XCPT_GP;
3651 uErr = 0;
3652 }
3653#ifdef DBGFTRACE_ENABLED
3654 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3655 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3656 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3657#endif
3658
3659 /*
3660 * Evaluate whether NMI blocking should be in effect.
3661 * Normally, NMI blocking is in effect whenever we inject an NMI.
3662 */
3663 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3664 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3665
3666#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3667 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3668 {
3669 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3670 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3671 return rcStrict0;
3672
3673 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3674 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3675 {
3676 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3677 fBlockNmi = false;
3678 }
3679 }
3680#endif
3681
3682#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3683 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3684 {
3685 /*
3686 * If the event is being injected as part of VMRUN, it isn't subject to event
3687 * intercepts in the nested-guest. However, secondary exceptions that occur
3688 * during injection of any event -are- subject to exception intercepts.
3689 *
3690 * See AMD spec. 15.20 "Event Injection".
3691 */
3692 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3693 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3694 else
3695 {
3696 /*
3697 * Check and handle if the event being raised is intercepted.
3698 */
3699 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3700 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3701 return rcStrict0;
3702 }
3703 }
3704#endif
3705
3706 /*
3707 * Set NMI blocking if necessary.
3708 */
3709 if (fBlockNmi)
3710 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3711
3712 /*
3713 * Do recursion accounting.
3714 */
3715 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3716 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3717 if (pVCpu->iem.s.cXcptRecursions == 0)
3718 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3719 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3720 else
3721 {
3722 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3723 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3724 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3725
3726 if (pVCpu->iem.s.cXcptRecursions >= 4)
3727 {
3728#ifdef DEBUG_bird
3729 AssertFailed();
3730#endif
3731 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3732 }
3733
3734 /*
3735 * Evaluate the sequence of recurring events.
3736 */
3737 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3738 NULL /* pXcptRaiseInfo */);
3739 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3740 { /* likely */ }
3741 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3742 {
3743 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3744 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3745 u8Vector = X86_XCPT_DF;
3746 uErr = 0;
3747#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3748 /* VMX nested-guest #DF intercept needs to be checked here. */
3749 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3750 {
3751 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3752 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3753 return rcStrict0;
3754 }
3755#endif
3756 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3757 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3758 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3759 }
3760 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3761 {
3762 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3763 return iemInitiateCpuShutdown(pVCpu);
3764 }
3765 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3766 {
3767 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3768 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3769 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3770 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3771 return VERR_EM_GUEST_CPU_HANG;
3772 }
3773 else
3774 {
3775 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3776 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3777 return VERR_IEM_IPE_9;
3778 }
3779
3780 /*
3781 * The 'EXT' bit is set when an exception occurs during deliver of an external
3782 * event (such as an interrupt or earlier exception)[1]. Privileged software
3783 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3784 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3785 *
3786 * [1] - Intel spec. 6.13 "Error Code"
3787 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3788 * [3] - Intel Instruction reference for INT n.
3789 */
3790 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3791 && (fFlags & IEM_XCPT_FLAGS_ERR)
3792 && u8Vector != X86_XCPT_PF
3793 && u8Vector != X86_XCPT_DF)
3794 {
3795 uErr |= X86_TRAP_ERR_EXTERNAL;
3796 }
3797 }
3798
3799 pVCpu->iem.s.cXcptRecursions++;
3800 pVCpu->iem.s.uCurXcpt = u8Vector;
3801 pVCpu->iem.s.fCurXcpt = fFlags;
3802 pVCpu->iem.s.uCurXcptErr = uErr;
3803 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3804
3805 /*
3806 * Extensive logging.
3807 */
3808#if defined(LOG_ENABLED) && defined(IN_RING3)
3809 if (LogIs3Enabled())
3810 {
3811 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3812 PVM pVM = pVCpu->CTX_SUFF(pVM);
3813 char szRegs[4096];
3814 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3815 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3816 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3817 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3818 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3819 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3820 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3821 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3822 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3823 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3824 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3825 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3826 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3827 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3828 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3829 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3830 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3831 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3832 " efer=%016VR{efer}\n"
3833 " pat=%016VR{pat}\n"
3834 " sf_mask=%016VR{sf_mask}\n"
3835 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3836 " lstar=%016VR{lstar}\n"
3837 " star=%016VR{star} cstar=%016VR{cstar}\n"
3838 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3839 );
3840
3841 char szInstr[256];
3842 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3843 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3844 szInstr, sizeof(szInstr), NULL);
3845 Log3(("%s%s\n", szRegs, szInstr));
3846 }
3847#endif /* LOG_ENABLED */
3848
3849 /*
3850 * Stats.
3851 */
3852 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3853 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3854 else if (u8Vector <= X86_XCPT_LAST)
3855 {
3856 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3857 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3858 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3859 }
3860
3861 /*
3862 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3863 * to ensure that a stale TLB or paging cache entry will only cause one
3864 * spurious #PF.
3865 */
3866 if ( u8Vector == X86_XCPT_PF
3867 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3868 IEMTlbInvalidatePage(pVCpu, uCr2);
3869
3870 /*
3871 * Call the mode specific worker function.
3872 */
3873 VBOXSTRICTRC rcStrict;
3874 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3875 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3876 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3877 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3878 else
3879 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3880
3881 /* Flush the prefetch buffer. */
3882#ifdef IEM_WITH_CODE_TLB
3883 pVCpu->iem.s.pbInstrBuf = NULL;
3884#else
3885 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3886#endif
3887
3888 /*
3889 * Unwind.
3890 */
3891 pVCpu->iem.s.cXcptRecursions--;
3892 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3893 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3894 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3895 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3896 pVCpu->iem.s.cXcptRecursions + 1));
3897 return rcStrict;
3898}
3899
3900#ifdef IEM_WITH_SETJMP
3901/**
3902 * See iemRaiseXcptOrInt. Will not return.
3903 */
3904DECL_NO_RETURN(void)
3905iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3906 uint8_t cbInstr,
3907 uint8_t u8Vector,
3908 uint32_t fFlags,
3909 uint16_t uErr,
3910 uint64_t uCr2) RT_NOEXCEPT
3911{
3912 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3913 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3914}
3915#endif
3916
3917
3918/** \#DE - 00. */
3919VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3920{
3921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3922}
3923
3924
3925/** \#DB - 01.
3926 * @note This automatically clear DR7.GD. */
3927VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3928{
3929 /** @todo set/clear RF. */
3930 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3931 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3932}
3933
3934
3935/** \#BR - 05. */
3936VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3937{
3938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3939}
3940
3941
3942/** \#UD - 06. */
3943VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3944{
3945 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3946}
3947
3948
3949/** \#NM - 07. */
3950VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3951{
3952 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3953}
3954
3955
3956/** \#TS(err) - 0a. */
3957VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3958{
3959 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3960}
3961
3962
3963/** \#TS(tr) - 0a. */
3964VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3965{
3966 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3967 pVCpu->cpum.GstCtx.tr.Sel, 0);
3968}
3969
3970
3971/** \#TS(0) - 0a. */
3972VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3973{
3974 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3975 0, 0);
3976}
3977
3978
3979/** \#TS(err) - 0a. */
3980VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3981{
3982 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3983 uSel & X86_SEL_MASK_OFF_RPL, 0);
3984}
3985
3986
3987/** \#NP(err) - 0b. */
3988VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3989{
3990 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3991}
3992
3993
3994/** \#NP(sel) - 0b. */
3995VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3996{
3997 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3998 uSel & ~X86_SEL_RPL, 0);
3999}
4000
4001
4002/** \#SS(seg) - 0c. */
4003VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4004{
4005 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4006 uSel & ~X86_SEL_RPL, 0);
4007}
4008
4009
4010/** \#SS(err) - 0c. */
4011VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4012{
4013 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4014}
4015
4016
4017/** \#GP(n) - 0d. */
4018VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4019{
4020 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4021}
4022
4023
4024/** \#GP(0) - 0d. */
4025VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4026{
4027 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4028}
4029
4030#ifdef IEM_WITH_SETJMP
4031/** \#GP(0) - 0d. */
4032DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4033{
4034 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4035}
4036#endif
4037
4038
4039/** \#GP(sel) - 0d. */
4040VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4041{
4042 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4043 Sel & ~X86_SEL_RPL, 0);
4044}
4045
4046
4047/** \#GP(0) - 0d. */
4048VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4049{
4050 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4051}
4052
4053
4054/** \#GP(sel) - 0d. */
4055VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4056{
4057 NOREF(iSegReg); NOREF(fAccess);
4058 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4059 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4060}
4061
4062#ifdef IEM_WITH_SETJMP
4063/** \#GP(sel) - 0d, longjmp. */
4064DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4065{
4066 NOREF(iSegReg); NOREF(fAccess);
4067 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4068 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4069}
4070#endif
4071
4072/** \#GP(sel) - 0d. */
4073VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4074{
4075 NOREF(Sel);
4076 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4077}
4078
4079#ifdef IEM_WITH_SETJMP
4080/** \#GP(sel) - 0d, longjmp. */
4081DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4082{
4083 NOREF(Sel);
4084 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4085}
4086#endif
4087
4088
4089/** \#GP(sel) - 0d. */
4090VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4091{
4092 NOREF(iSegReg); NOREF(fAccess);
4093 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4094}
4095
4096#ifdef IEM_WITH_SETJMP
4097/** \#GP(sel) - 0d, longjmp. */
4098DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4099{
4100 NOREF(iSegReg); NOREF(fAccess);
4101 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4102}
4103#endif
4104
4105
4106/** \#PF(n) - 0e. */
4107VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4108{
4109 uint16_t uErr;
4110 switch (rc)
4111 {
4112 case VERR_PAGE_NOT_PRESENT:
4113 case VERR_PAGE_TABLE_NOT_PRESENT:
4114 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4115 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4116 uErr = 0;
4117 break;
4118
4119 default:
4120 AssertMsgFailed(("%Rrc\n", rc));
4121 RT_FALL_THRU();
4122 case VERR_ACCESS_DENIED:
4123 uErr = X86_TRAP_PF_P;
4124 break;
4125
4126 /** @todo reserved */
4127 }
4128
4129 if (pVCpu->iem.s.uCpl == 3)
4130 uErr |= X86_TRAP_PF_US;
4131
4132 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4133 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4134 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4135 uErr |= X86_TRAP_PF_ID;
4136
4137#if 0 /* This is so much non-sense, really. Why was it done like that? */
4138 /* Note! RW access callers reporting a WRITE protection fault, will clear
4139 the READ flag before calling. So, read-modify-write accesses (RW)
4140 can safely be reported as READ faults. */
4141 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4142 uErr |= X86_TRAP_PF_RW;
4143#else
4144 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4145 {
4146 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4147 /// (regardless of outcome of the comparison in the latter case).
4148 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4149 uErr |= X86_TRAP_PF_RW;
4150 }
4151#endif
4152
4153 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4154 uErr, GCPtrWhere);
4155}
4156
4157#ifdef IEM_WITH_SETJMP
4158/** \#PF(n) - 0e, longjmp. */
4159DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4160{
4161 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4162}
4163#endif
4164
4165
4166/** \#MF(0) - 10. */
4167VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4168{
4169 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4170 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4171
4172 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4173 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4174 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4175}
4176
4177
4178/** \#AC(0) - 11. */
4179VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4180{
4181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4182}
4183
4184#ifdef IEM_WITH_SETJMP
4185/** \#AC(0) - 11, longjmp. */
4186DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4187{
4188 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4189}
4190#endif
4191
4192
4193/** \#XF(0)/\#XM(0) - 19. */
4194VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4195{
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4197}
4198
4199
4200/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4201IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4202{
4203 NOREF(cbInstr);
4204 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4205}
4206
4207
4208/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4209IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4210{
4211 NOREF(cbInstr);
4212 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4213}
4214
4215
4216/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4217IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4218{
4219 NOREF(cbInstr);
4220 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4221}
4222
4223
4224/** @} */
4225
4226/** @name Common opcode decoders.
4227 * @{
4228 */
4229//#include <iprt/mem.h>
4230
4231/**
4232 * Used to add extra details about a stub case.
4233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4234 */
4235void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4236{
4237#if defined(LOG_ENABLED) && defined(IN_RING3)
4238 PVM pVM = pVCpu->CTX_SUFF(pVM);
4239 char szRegs[4096];
4240 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4241 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4242 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4243 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4244 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4245 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4246 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4247 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4248 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4249 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4250 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4251 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4252 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4253 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4254 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4255 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4256 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4257 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4258 " efer=%016VR{efer}\n"
4259 " pat=%016VR{pat}\n"
4260 " sf_mask=%016VR{sf_mask}\n"
4261 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4262 " lstar=%016VR{lstar}\n"
4263 " star=%016VR{star} cstar=%016VR{cstar}\n"
4264 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4265 );
4266
4267 char szInstr[256];
4268 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4269 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4270 szInstr, sizeof(szInstr), NULL);
4271
4272 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4273#else
4274 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4275#endif
4276}
4277
4278/** @} */
4279
4280
4281
4282/** @name Register Access.
4283 * @{
4284 */
4285
4286/**
4287 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4288 *
4289 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4290 * segment limit.
4291 *
4292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4293 * @param offNextInstr The offset of the next instruction.
4294 */
4295VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4296{
4297 switch (pVCpu->iem.s.enmEffOpSize)
4298 {
4299 case IEMMODE_16BIT:
4300 {
4301 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4302 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4303 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4304 return iemRaiseGeneralProtectionFault0(pVCpu);
4305 pVCpu->cpum.GstCtx.rip = uNewIp;
4306 break;
4307 }
4308
4309 case IEMMODE_32BIT:
4310 {
4311 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4312 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4313
4314 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4315 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4316 return iemRaiseGeneralProtectionFault0(pVCpu);
4317 pVCpu->cpum.GstCtx.rip = uNewEip;
4318 break;
4319 }
4320
4321 case IEMMODE_64BIT:
4322 {
4323 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4324
4325 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4326 if (!IEM_IS_CANONICAL(uNewRip))
4327 return iemRaiseGeneralProtectionFault0(pVCpu);
4328 pVCpu->cpum.GstCtx.rip = uNewRip;
4329 break;
4330 }
4331
4332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4333 }
4334
4335 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4336
4337#ifndef IEM_WITH_CODE_TLB
4338 /* Flush the prefetch buffer. */
4339 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4340#endif
4341
4342 return VINF_SUCCESS;
4343}
4344
4345
4346/**
4347 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4348 *
4349 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4350 * segment limit.
4351 *
4352 * @returns Strict VBox status code.
4353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4354 * @param offNextInstr The offset of the next instruction.
4355 */
4356VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4357{
4358 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4359
4360 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4361 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4362 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4363 return iemRaiseGeneralProtectionFault0(pVCpu);
4364 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4365 pVCpu->cpum.GstCtx.rip = uNewIp;
4366 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4367
4368#ifndef IEM_WITH_CODE_TLB
4369 /* Flush the prefetch buffer. */
4370 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4371#endif
4372
4373 return VINF_SUCCESS;
4374}
4375
4376
4377/**
4378 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4379 *
4380 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4381 * segment limit.
4382 *
4383 * @returns Strict VBox status code.
4384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4385 * @param offNextInstr The offset of the next instruction.
4386 */
4387VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4388{
4389 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4390
4391 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4392 {
4393 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4394
4395 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4396 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4397 return iemRaiseGeneralProtectionFault0(pVCpu);
4398 pVCpu->cpum.GstCtx.rip = uNewEip;
4399 }
4400 else
4401 {
4402 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4403
4404 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4405 if (!IEM_IS_CANONICAL(uNewRip))
4406 return iemRaiseGeneralProtectionFault0(pVCpu);
4407 pVCpu->cpum.GstCtx.rip = uNewRip;
4408 }
4409 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4410
4411#ifndef IEM_WITH_CODE_TLB
4412 /* Flush the prefetch buffer. */
4413 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4414#endif
4415
4416 return VINF_SUCCESS;
4417}
4418
4419
4420/**
4421 * Performs a near jump to the specified address.
4422 *
4423 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4424 * segment limit.
4425 *
4426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4427 * @param uNewRip The new RIP value.
4428 */
4429VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4430{
4431 switch (pVCpu->iem.s.enmEffOpSize)
4432 {
4433 case IEMMODE_16BIT:
4434 {
4435 Assert(uNewRip <= UINT16_MAX);
4436 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4437 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4438 return iemRaiseGeneralProtectionFault0(pVCpu);
4439 /** @todo Test 16-bit jump in 64-bit mode. */
4440 pVCpu->cpum.GstCtx.rip = uNewRip;
4441 break;
4442 }
4443
4444 case IEMMODE_32BIT:
4445 {
4446 Assert(uNewRip <= UINT32_MAX);
4447 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4448 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4449
4450 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4451 return iemRaiseGeneralProtectionFault0(pVCpu);
4452 pVCpu->cpum.GstCtx.rip = uNewRip;
4453 break;
4454 }
4455
4456 case IEMMODE_64BIT:
4457 {
4458 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4459
4460 if (!IEM_IS_CANONICAL(uNewRip))
4461 return iemRaiseGeneralProtectionFault0(pVCpu);
4462 pVCpu->cpum.GstCtx.rip = uNewRip;
4463 break;
4464 }
4465
4466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4467 }
4468
4469 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4470
4471#ifndef IEM_WITH_CODE_TLB
4472 /* Flush the prefetch buffer. */
4473 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4474#endif
4475
4476 return VINF_SUCCESS;
4477}
4478
4479/** @} */
4480
4481
4482/** @name FPU access and helpers.
4483 *
4484 * @{
4485 */
4486
4487/**
4488 * Updates the x87.DS and FPUDP registers.
4489 *
4490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4491 * @param pFpuCtx The FPU context.
4492 * @param iEffSeg The effective segment register.
4493 * @param GCPtrEff The effective address relative to @a iEffSeg.
4494 */
4495DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4496{
4497 RTSEL sel;
4498 switch (iEffSeg)
4499 {
4500 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4501 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4502 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4503 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4504 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4505 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4506 default:
4507 AssertMsgFailed(("%d\n", iEffSeg));
4508 sel = pVCpu->cpum.GstCtx.ds.Sel;
4509 }
4510 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4511 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4512 {
4513 pFpuCtx->DS = 0;
4514 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4515 }
4516 else if (!IEM_IS_LONG_MODE(pVCpu))
4517 {
4518 pFpuCtx->DS = sel;
4519 pFpuCtx->FPUDP = GCPtrEff;
4520 }
4521 else
4522 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4523}
4524
4525
4526/**
4527 * Rotates the stack registers in the push direction.
4528 *
4529 * @param pFpuCtx The FPU context.
4530 * @remarks This is a complete waste of time, but fxsave stores the registers in
4531 * stack order.
4532 */
4533DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4534{
4535 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4536 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4537 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4538 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4539 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4540 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4541 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4542 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4543 pFpuCtx->aRegs[0].r80 = r80Tmp;
4544}
4545
4546
4547/**
4548 * Rotates the stack registers in the pop direction.
4549 *
4550 * @param pFpuCtx The FPU context.
4551 * @remarks This is a complete waste of time, but fxsave stores the registers in
4552 * stack order.
4553 */
4554DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4555{
4556 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4557 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4558 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4559 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4560 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4561 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4562 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4563 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4564 pFpuCtx->aRegs[7].r80 = r80Tmp;
4565}
4566
4567
4568/**
4569 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4570 * exception prevents it.
4571 *
4572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4573 * @param pResult The FPU operation result to push.
4574 * @param pFpuCtx The FPU context.
4575 */
4576static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4577{
4578 /* Update FSW and bail if there are pending exceptions afterwards. */
4579 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4580 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4581 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4582 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4583 {
4584 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4585 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4586 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4587 pFpuCtx->FSW = fFsw;
4588 return;
4589 }
4590
4591 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4592 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4593 {
4594 /* All is fine, push the actual value. */
4595 pFpuCtx->FTW |= RT_BIT(iNewTop);
4596 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4597 }
4598 else if (pFpuCtx->FCW & X86_FCW_IM)
4599 {
4600 /* Masked stack overflow, push QNaN. */
4601 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4602 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4603 }
4604 else
4605 {
4606 /* Raise stack overflow, don't push anything. */
4607 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4608 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4609 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4610 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4611 return;
4612 }
4613
4614 fFsw &= ~X86_FSW_TOP_MASK;
4615 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4616 pFpuCtx->FSW = fFsw;
4617
4618 iemFpuRotateStackPush(pFpuCtx);
4619 RT_NOREF(pVCpu);
4620}
4621
4622
4623/**
4624 * Stores a result in a FPU register and updates the FSW and FTW.
4625 *
4626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4627 * @param pFpuCtx The FPU context.
4628 * @param pResult The result to store.
4629 * @param iStReg Which FPU register to store it in.
4630 */
4631static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4632{
4633 Assert(iStReg < 8);
4634 uint16_t fNewFsw = pFpuCtx->FSW;
4635 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4636 fNewFsw &= ~X86_FSW_C_MASK;
4637 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4638 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4639 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4641 pFpuCtx->FSW = fNewFsw;
4642 pFpuCtx->FTW |= RT_BIT(iReg);
4643 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4644 RT_NOREF(pVCpu);
4645}
4646
4647
4648/**
4649 * Only updates the FPU status word (FSW) with the result of the current
4650 * instruction.
4651 *
4652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4653 * @param pFpuCtx The FPU context.
4654 * @param u16FSW The FSW output of the current instruction.
4655 */
4656static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4657{
4658 uint16_t fNewFsw = pFpuCtx->FSW;
4659 fNewFsw &= ~X86_FSW_C_MASK;
4660 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4661 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4662 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4663 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4664 pFpuCtx->FSW = fNewFsw;
4665 RT_NOREF(pVCpu);
4666}
4667
4668
4669/**
4670 * Pops one item off the FPU stack if no pending exception prevents it.
4671 *
4672 * @param pFpuCtx The FPU context.
4673 */
4674static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4675{
4676 /* Check pending exceptions. */
4677 uint16_t uFSW = pFpuCtx->FSW;
4678 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4679 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4680 return;
4681
4682 /* TOP--. */
4683 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4684 uFSW &= ~X86_FSW_TOP_MASK;
4685 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4686 pFpuCtx->FSW = uFSW;
4687
4688 /* Mark the previous ST0 as empty. */
4689 iOldTop >>= X86_FSW_TOP_SHIFT;
4690 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4691
4692 /* Rotate the registers. */
4693 iemFpuRotateStackPop(pFpuCtx);
4694}
4695
4696
4697/**
4698 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4699 *
4700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4701 * @param pResult The FPU operation result to push.
4702 */
4703void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4704{
4705 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4706 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4707 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4708}
4709
4710
4711/**
4712 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4713 * and sets FPUDP and FPUDS.
4714 *
4715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4716 * @param pResult The FPU operation result to push.
4717 * @param iEffSeg The effective segment register.
4718 * @param GCPtrEff The effective address relative to @a iEffSeg.
4719 */
4720void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4721{
4722 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4723 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4724 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4725 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4726}
4727
4728
4729/**
4730 * Replace ST0 with the first value and push the second onto the FPU stack,
4731 * unless a pending exception prevents it.
4732 *
4733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4734 * @param pResult The FPU operation result to store and push.
4735 */
4736void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4737{
4738 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4739 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4740
4741 /* Update FSW and bail if there are pending exceptions afterwards. */
4742 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4743 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4744 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4745 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4746 {
4747 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4748 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4749 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4750 pFpuCtx->FSW = fFsw;
4751 return;
4752 }
4753
4754 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4755 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4756 {
4757 /* All is fine, push the actual value. */
4758 pFpuCtx->FTW |= RT_BIT(iNewTop);
4759 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4760 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4761 }
4762 else if (pFpuCtx->FCW & X86_FCW_IM)
4763 {
4764 /* Masked stack overflow, push QNaN. */
4765 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4766 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4767 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4768 }
4769 else
4770 {
4771 /* Raise stack overflow, don't push anything. */
4772 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4773 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4774 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4776 return;
4777 }
4778
4779 fFsw &= ~X86_FSW_TOP_MASK;
4780 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4781 pFpuCtx->FSW = fFsw;
4782
4783 iemFpuRotateStackPush(pFpuCtx);
4784}
4785
4786
4787/**
4788 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4789 * FOP.
4790 *
4791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4792 * @param pResult The result to store.
4793 * @param iStReg Which FPU register to store it in.
4794 */
4795void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4796{
4797 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4798 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4799 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4800}
4801
4802
4803/**
4804 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4805 * FOP, and then pops the stack.
4806 *
4807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4808 * @param pResult The result to store.
4809 * @param iStReg Which FPU register to store it in.
4810 */
4811void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4812{
4813 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4814 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4815 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4816 iemFpuMaybePopOne(pFpuCtx);
4817}
4818
4819
4820/**
4821 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4822 * FPUDP, and FPUDS.
4823 *
4824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4825 * @param pResult The result to store.
4826 * @param iStReg Which FPU register to store it in.
4827 * @param iEffSeg The effective memory operand selector register.
4828 * @param GCPtrEff The effective memory operand offset.
4829 */
4830void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4831 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4832{
4833 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4834 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4835 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4836 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4837}
4838
4839
4840/**
4841 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4842 * FPUDP, and FPUDS, and then pops the stack.
4843 *
4844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4845 * @param pResult The result to store.
4846 * @param iStReg Which FPU register to store it in.
4847 * @param iEffSeg The effective memory operand selector register.
4848 * @param GCPtrEff The effective memory operand offset.
4849 */
4850void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4851 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4852{
4853 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4854 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4855 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4856 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4857 iemFpuMaybePopOne(pFpuCtx);
4858}
4859
4860
4861/**
4862 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4863 *
4864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4865 */
4866void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4867{
4868 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4870}
4871
4872
4873/**
4874 * Updates the FSW, FOP, FPUIP, and FPUCS.
4875 *
4876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4877 * @param u16FSW The FSW from the current instruction.
4878 */
4879void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4880{
4881 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4882 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4883 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4884}
4885
4886
4887/**
4888 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4889 *
4890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4891 * @param u16FSW The FSW from the current instruction.
4892 */
4893void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4894{
4895 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4896 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4897 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4898 iemFpuMaybePopOne(pFpuCtx);
4899}
4900
4901
4902/**
4903 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4904 *
4905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4906 * @param u16FSW The FSW from the current instruction.
4907 * @param iEffSeg The effective memory operand selector register.
4908 * @param GCPtrEff The effective memory operand offset.
4909 */
4910void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4911{
4912 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4913 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4914 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4915 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4916}
4917
4918
4919/**
4920 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4921 *
4922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4923 * @param u16FSW The FSW from the current instruction.
4924 */
4925void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4926{
4927 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4928 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4929 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4930 iemFpuMaybePopOne(pFpuCtx);
4931 iemFpuMaybePopOne(pFpuCtx);
4932}
4933
4934
4935/**
4936 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4937 *
4938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4939 * @param u16FSW The FSW from the current instruction.
4940 * @param iEffSeg The effective memory operand selector register.
4941 * @param GCPtrEff The effective memory operand offset.
4942 */
4943void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4944{
4945 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4946 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4947 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4948 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4949 iemFpuMaybePopOne(pFpuCtx);
4950}
4951
4952
4953/**
4954 * Worker routine for raising an FPU stack underflow exception.
4955 *
4956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4957 * @param pFpuCtx The FPU context.
4958 * @param iStReg The stack register being accessed.
4959 */
4960static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4961{
4962 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4963 if (pFpuCtx->FCW & X86_FCW_IM)
4964 {
4965 /* Masked underflow. */
4966 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4967 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4968 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4969 if (iStReg != UINT8_MAX)
4970 {
4971 pFpuCtx->FTW |= RT_BIT(iReg);
4972 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4973 }
4974 }
4975 else
4976 {
4977 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4979 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4980 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4981 }
4982 RT_NOREF(pVCpu);
4983}
4984
4985
4986/**
4987 * Raises a FPU stack underflow exception.
4988 *
4989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4990 * @param iStReg The destination register that should be loaded
4991 * with QNaN if \#IS is not masked. Specify
4992 * UINT8_MAX if none (like for fcom).
4993 */
4994void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4995{
4996 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4997 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4998 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4999}
5000
5001
5002void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5003{
5004 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5005 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5006 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5007 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5008}
5009
5010
5011void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5012{
5013 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5014 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5015 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5016 iemFpuMaybePopOne(pFpuCtx);
5017}
5018
5019
5020void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5021{
5022 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5023 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5024 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5025 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5026 iemFpuMaybePopOne(pFpuCtx);
5027}
5028
5029
5030void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5031{
5032 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5033 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5034 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5035 iemFpuMaybePopOne(pFpuCtx);
5036 iemFpuMaybePopOne(pFpuCtx);
5037}
5038
5039
5040void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5041{
5042 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5043 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5044
5045 if (pFpuCtx->FCW & X86_FCW_IM)
5046 {
5047 /* Masked overflow - Push QNaN. */
5048 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5049 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5050 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5051 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5052 pFpuCtx->FTW |= RT_BIT(iNewTop);
5053 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5054 iemFpuRotateStackPush(pFpuCtx);
5055 }
5056 else
5057 {
5058 /* Exception pending - don't change TOP or the register stack. */
5059 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5060 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5061 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5062 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5063 }
5064}
5065
5066
5067void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5068{
5069 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5070 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5071
5072 if (pFpuCtx->FCW & X86_FCW_IM)
5073 {
5074 /* Masked overflow - Push QNaN. */
5075 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5076 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5077 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5078 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5079 pFpuCtx->FTW |= RT_BIT(iNewTop);
5080 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5081 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5082 iemFpuRotateStackPush(pFpuCtx);
5083 }
5084 else
5085 {
5086 /* Exception pending - don't change TOP or the register stack. */
5087 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5088 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5089 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5090 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5091 }
5092}
5093
5094
5095/**
5096 * Worker routine for raising an FPU stack overflow exception on a push.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pFpuCtx The FPU context.
5100 */
5101static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5102{
5103 if (pFpuCtx->FCW & X86_FCW_IM)
5104 {
5105 /* Masked overflow. */
5106 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5107 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5108 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5109 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5110 pFpuCtx->FTW |= RT_BIT(iNewTop);
5111 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5112 iemFpuRotateStackPush(pFpuCtx);
5113 }
5114 else
5115 {
5116 /* Exception pending - don't change TOP or the register stack. */
5117 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5118 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5119 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5121 }
5122 RT_NOREF(pVCpu);
5123}
5124
5125
5126/**
5127 * Raises a FPU stack overflow exception on a push.
5128 *
5129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5130 */
5131void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5132{
5133 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5134 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5135 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5136}
5137
5138
5139/**
5140 * Raises a FPU stack overflow exception on a push with a memory operand.
5141 *
5142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5143 * @param iEffSeg The effective memory operand selector register.
5144 * @param GCPtrEff The effective memory operand offset.
5145 */
5146void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5147{
5148 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5149 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5150 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5151 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5152}
5153
5154/** @} */
5155
5156
5157/** @name SSE+AVX SIMD access and helpers.
5158 *
5159 * @{
5160 */
5161/**
5162 * Stores a result in a SIMD XMM register, updates the MXCSR.
5163 *
5164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5165 * @param pResult The result to store.
5166 * @param iXmmReg Which SIMD XMM register to store the result in.
5167 */
5168void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5169{
5170 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5171 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5172
5173 /* The result is only updated if there is no unmasked exception pending. */
5174 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5175 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5176 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5177}
5178
5179
5180/**
5181 * Updates the MXCSR.
5182 *
5183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5184 * @param fMxcsr The new MXCSR value.
5185 */
5186void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5187{
5188 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5189 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5190}
5191/** @} */
5192
5193
5194/** @name Memory access.
5195 *
5196 * @{
5197 */
5198
5199
5200/**
5201 * Updates the IEMCPU::cbWritten counter if applicable.
5202 *
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param fAccess The access being accounted for.
5205 * @param cbMem The access size.
5206 */
5207DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5208{
5209 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5210 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5211 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5212}
5213
5214
5215/**
5216 * Applies the segment limit, base and attributes.
5217 *
5218 * This may raise a \#GP or \#SS.
5219 *
5220 * @returns VBox strict status code.
5221 *
5222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5223 * @param fAccess The kind of access which is being performed.
5224 * @param iSegReg The index of the segment register to apply.
5225 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5226 * TSS, ++).
5227 * @param cbMem The access size.
5228 * @param pGCPtrMem Pointer to the guest memory address to apply
5229 * segmentation to. Input and output parameter.
5230 */
5231VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5232{
5233 if (iSegReg == UINT8_MAX)
5234 return VINF_SUCCESS;
5235
5236 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5237 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5238 switch (pVCpu->iem.s.enmCpuMode)
5239 {
5240 case IEMMODE_16BIT:
5241 case IEMMODE_32BIT:
5242 {
5243 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5244 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5245
5246 if ( pSel->Attr.n.u1Present
5247 && !pSel->Attr.n.u1Unusable)
5248 {
5249 Assert(pSel->Attr.n.u1DescType);
5250 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5251 {
5252 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5253 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5254 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5255
5256 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5257 {
5258 /** @todo CPL check. */
5259 }
5260
5261 /*
5262 * There are two kinds of data selectors, normal and expand down.
5263 */
5264 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5265 {
5266 if ( GCPtrFirst32 > pSel->u32Limit
5267 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5268 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5269 }
5270 else
5271 {
5272 /*
5273 * The upper boundary is defined by the B bit, not the G bit!
5274 */
5275 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5276 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5277 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5278 }
5279 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5280 }
5281 else
5282 {
5283 /*
5284 * Code selector and usually be used to read thru, writing is
5285 * only permitted in real and V8086 mode.
5286 */
5287 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5288 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5289 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5290 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5291 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5292
5293 if ( GCPtrFirst32 > pSel->u32Limit
5294 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5295 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5296
5297 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5298 {
5299 /** @todo CPL check. */
5300 }
5301
5302 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5303 }
5304 }
5305 else
5306 return iemRaiseGeneralProtectionFault0(pVCpu);
5307 return VINF_SUCCESS;
5308 }
5309
5310 case IEMMODE_64BIT:
5311 {
5312 RTGCPTR GCPtrMem = *pGCPtrMem;
5313 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5314 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5315
5316 Assert(cbMem >= 1);
5317 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5318 return VINF_SUCCESS;
5319 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5320 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5321 return iemRaiseGeneralProtectionFault0(pVCpu);
5322 }
5323
5324 default:
5325 AssertFailedReturn(VERR_IEM_IPE_7);
5326 }
5327}
5328
5329
5330/**
5331 * Translates a virtual address to a physical physical address and checks if we
5332 * can access the page as specified.
5333 *
5334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5335 * @param GCPtrMem The virtual address.
5336 * @param fAccess The intended access.
5337 * @param pGCPhysMem Where to return the physical address.
5338 */
5339VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5340{
5341 /** @todo Need a different PGM interface here. We're currently using
5342 * generic / REM interfaces. this won't cut it for R0. */
5343 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5344 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5345 * here. */
5346 PGMPTWALK Walk;
5347 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5348 if (RT_FAILURE(rc))
5349 {
5350 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5351 /** @todo Check unassigned memory in unpaged mode. */
5352 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5353#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5354 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5355 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5356#endif
5357 *pGCPhysMem = NIL_RTGCPHYS;
5358 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5359 }
5360
5361 /* If the page is writable and does not have the no-exec bit set, all
5362 access is allowed. Otherwise we'll have to check more carefully... */
5363 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5364 {
5365 /* Write to read only memory? */
5366 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5367 && !(Walk.fEffective & X86_PTE_RW)
5368 && ( ( pVCpu->iem.s.uCpl == 3
5369 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5370 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5371 {
5372 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5373 *pGCPhysMem = NIL_RTGCPHYS;
5374#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5375 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5376 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5377#endif
5378 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5379 }
5380
5381 /* Kernel memory accessed by userland? */
5382 if ( !(Walk.fEffective & X86_PTE_US)
5383 && pVCpu->iem.s.uCpl == 3
5384 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5385 {
5386 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5387 *pGCPhysMem = NIL_RTGCPHYS;
5388#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5389 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5390 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5391#endif
5392 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5393 }
5394
5395 /* Executing non-executable memory? */
5396 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5397 && (Walk.fEffective & X86_PTE_PAE_NX)
5398 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5399 {
5400 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5401 *pGCPhysMem = NIL_RTGCPHYS;
5402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5405#endif
5406 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5407 VERR_ACCESS_DENIED);
5408 }
5409 }
5410
5411 /*
5412 * Set the dirty / access flags.
5413 * ASSUMES this is set when the address is translated rather than on committ...
5414 */
5415 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5416 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5417 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5418 {
5419 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5420 AssertRC(rc2);
5421 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5422 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5423 }
5424
5425 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5426 *pGCPhysMem = GCPhys;
5427 return VINF_SUCCESS;
5428}
5429
5430
5431/**
5432 * Looks up a memory mapping entry.
5433 *
5434 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5436 * @param pvMem The memory address.
5437 * @param fAccess The access to.
5438 */
5439DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5440{
5441 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5442 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5443 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5444 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5445 return 0;
5446 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5447 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5448 return 1;
5449 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5450 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5451 return 2;
5452 return VERR_NOT_FOUND;
5453}
5454
5455
5456/**
5457 * Finds a free memmap entry when using iNextMapping doesn't work.
5458 *
5459 * @returns Memory mapping index, 1024 on failure.
5460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5461 */
5462static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5463{
5464 /*
5465 * The easy case.
5466 */
5467 if (pVCpu->iem.s.cActiveMappings == 0)
5468 {
5469 pVCpu->iem.s.iNextMapping = 1;
5470 return 0;
5471 }
5472
5473 /* There should be enough mappings for all instructions. */
5474 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5475
5476 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5477 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5478 return i;
5479
5480 AssertFailedReturn(1024);
5481}
5482
5483
5484/**
5485 * Commits a bounce buffer that needs writing back and unmaps it.
5486 *
5487 * @returns Strict VBox status code.
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param iMemMap The index of the buffer to commit.
5490 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5491 * Always false in ring-3, obviously.
5492 */
5493static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5494{
5495 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5496 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5497#ifdef IN_RING3
5498 Assert(!fPostponeFail);
5499 RT_NOREF_PV(fPostponeFail);
5500#endif
5501
5502 /*
5503 * Do the writing.
5504 */
5505 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5506 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5507 {
5508 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5509 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5510 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5511 if (!pVCpu->iem.s.fBypassHandlers)
5512 {
5513 /*
5514 * Carefully and efficiently dealing with access handler return
5515 * codes make this a little bloated.
5516 */
5517 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5519 pbBuf,
5520 cbFirst,
5521 PGMACCESSORIGIN_IEM);
5522 if (rcStrict == VINF_SUCCESS)
5523 {
5524 if (cbSecond)
5525 {
5526 rcStrict = PGMPhysWrite(pVM,
5527 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5528 pbBuf + cbFirst,
5529 cbSecond,
5530 PGMACCESSORIGIN_IEM);
5531 if (rcStrict == VINF_SUCCESS)
5532 { /* nothing */ }
5533 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5534 {
5535 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5538 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5539 }
5540#ifndef IN_RING3
5541 else if (fPostponeFail)
5542 {
5543 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5547 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5548 return iemSetPassUpStatus(pVCpu, rcStrict);
5549 }
5550#endif
5551 else
5552 {
5553 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5555 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5556 return rcStrict;
5557 }
5558 }
5559 }
5560 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5561 {
5562 if (!cbSecond)
5563 {
5564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5566 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5567 }
5568 else
5569 {
5570 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5571 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5572 pbBuf + cbFirst,
5573 cbSecond,
5574 PGMACCESSORIGIN_IEM);
5575 if (rcStrict2 == VINF_SUCCESS)
5576 {
5577 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5578 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5579 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5580 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5581 }
5582 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5583 {
5584 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5585 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5586 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5587 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5588 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5589 }
5590#ifndef IN_RING3
5591 else if (fPostponeFail)
5592 {
5593 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5594 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5595 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5596 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5597 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5598 return iemSetPassUpStatus(pVCpu, rcStrict);
5599 }
5600#endif
5601 else
5602 {
5603 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5604 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5605 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5606 return rcStrict2;
5607 }
5608 }
5609 }
5610#ifndef IN_RING3
5611 else if (fPostponeFail)
5612 {
5613 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5614 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5615 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5616 if (!cbSecond)
5617 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5618 else
5619 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5620 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5621 return iemSetPassUpStatus(pVCpu, rcStrict);
5622 }
5623#endif
5624 else
5625 {
5626 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5628 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5629 return rcStrict;
5630 }
5631 }
5632 else
5633 {
5634 /*
5635 * No access handlers, much simpler.
5636 */
5637 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5638 if (RT_SUCCESS(rc))
5639 {
5640 if (cbSecond)
5641 {
5642 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5643 if (RT_SUCCESS(rc))
5644 { /* likely */ }
5645 else
5646 {
5647 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5648 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5649 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5650 return rc;
5651 }
5652 }
5653 }
5654 else
5655 {
5656 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5657 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5658 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5659 return rc;
5660 }
5661 }
5662 }
5663
5664#if defined(IEM_LOG_MEMORY_WRITES)
5665 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5666 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5667 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5668 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5669 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5670 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5671
5672 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5673 g_cbIemWrote = cbWrote;
5674 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5675#endif
5676
5677 /*
5678 * Free the mapping entry.
5679 */
5680 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5681 Assert(pVCpu->iem.s.cActiveMappings != 0);
5682 pVCpu->iem.s.cActiveMappings--;
5683 return VINF_SUCCESS;
5684}
5685
5686
5687/**
5688 * iemMemMap worker that deals with a request crossing pages.
5689 */
5690static VBOXSTRICTRC
5691iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5692{
5693 /*
5694 * Do the address translations.
5695 */
5696 RTGCPHYS GCPhysFirst;
5697 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5698 if (rcStrict != VINF_SUCCESS)
5699 return rcStrict;
5700
5701 RTGCPHYS GCPhysSecond;
5702 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5703 fAccess, &GCPhysSecond);
5704 if (rcStrict != VINF_SUCCESS)
5705 return rcStrict;
5706 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5707
5708 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5709
5710 /*
5711 * Read in the current memory content if it's a read, execute or partial
5712 * write access.
5713 */
5714 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5715 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5716 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5717
5718 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5719 {
5720 if (!pVCpu->iem.s.fBypassHandlers)
5721 {
5722 /*
5723 * Must carefully deal with access handler status codes here,
5724 * makes the code a bit bloated.
5725 */
5726 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5727 if (rcStrict == VINF_SUCCESS)
5728 {
5729 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5730 if (rcStrict == VINF_SUCCESS)
5731 { /*likely */ }
5732 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5733 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5734 else
5735 {
5736 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5737 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5738 return rcStrict;
5739 }
5740 }
5741 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5742 {
5743 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5744 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5745 {
5746 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5747 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5748 }
5749 else
5750 {
5751 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5752 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5753 return rcStrict2;
5754 }
5755 }
5756 else
5757 {
5758 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5759 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5760 return rcStrict;
5761 }
5762 }
5763 else
5764 {
5765 /*
5766 * No informational status codes here, much more straight forward.
5767 */
5768 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5769 if (RT_SUCCESS(rc))
5770 {
5771 Assert(rc == VINF_SUCCESS);
5772 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5773 if (RT_SUCCESS(rc))
5774 Assert(rc == VINF_SUCCESS);
5775 else
5776 {
5777 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5778 return rc;
5779 }
5780 }
5781 else
5782 {
5783 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5784 return rc;
5785 }
5786 }
5787 }
5788#ifdef VBOX_STRICT
5789 else
5790 memset(pbBuf, 0xcc, cbMem);
5791 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5792 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5793#endif
5794 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5795
5796 /*
5797 * Commit the bounce buffer entry.
5798 */
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5801 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5802 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5803 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5804 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5805 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5806 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5807 pVCpu->iem.s.cActiveMappings++;
5808
5809 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5810 *ppvMem = pbBuf;
5811 return VINF_SUCCESS;
5812}
5813
5814
5815/**
5816 * iemMemMap woker that deals with iemMemPageMap failures.
5817 */
5818static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5819 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5820{
5821 /*
5822 * Filter out conditions we can handle and the ones which shouldn't happen.
5823 */
5824 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5825 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5826 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5827 {
5828 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5829 return rcMap;
5830 }
5831 pVCpu->iem.s.cPotentialExits++;
5832
5833 /*
5834 * Read in the current memory content if it's a read, execute or partial
5835 * write access.
5836 */
5837 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5838 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5839 {
5840 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5841 memset(pbBuf, 0xff, cbMem);
5842 else
5843 {
5844 int rc;
5845 if (!pVCpu->iem.s.fBypassHandlers)
5846 {
5847 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5848 if (rcStrict == VINF_SUCCESS)
5849 { /* nothing */ }
5850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5851 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5852 else
5853 {
5854 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5855 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5856 return rcStrict;
5857 }
5858 }
5859 else
5860 {
5861 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5862 if (RT_SUCCESS(rc))
5863 { /* likely */ }
5864 else
5865 {
5866 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5867 GCPhysFirst, rc));
5868 return rc;
5869 }
5870 }
5871 }
5872 }
5873#ifdef VBOX_STRICT
5874 else
5875 memset(pbBuf, 0xcc, cbMem);
5876#endif
5877#ifdef VBOX_STRICT
5878 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5879 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5880#endif
5881
5882 /*
5883 * Commit the bounce buffer entry.
5884 */
5885 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5889 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5890 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5891 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5892 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5893 pVCpu->iem.s.cActiveMappings++;
5894
5895 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5896 *ppvMem = pbBuf;
5897 return VINF_SUCCESS;
5898}
5899
5900
5901
5902/**
5903 * Maps the specified guest memory for the given kind of access.
5904 *
5905 * This may be using bounce buffering of the memory if it's crossing a page
5906 * boundary or if there is an access handler installed for any of it. Because
5907 * of lock prefix guarantees, we're in for some extra clutter when this
5908 * happens.
5909 *
5910 * This may raise a \#GP, \#SS, \#PF or \#AC.
5911 *
5912 * @returns VBox strict status code.
5913 *
5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5915 * @param ppvMem Where to return the pointer to the mapped memory.
5916 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5917 * 8, 12, 16, 32 or 512. When used by string operations
5918 * it can be up to a page.
5919 * @param iSegReg The index of the segment register to use for this
5920 * access. The base and limits are checked. Use UINT8_MAX
5921 * to indicate that no segmentation is required (for IDT,
5922 * GDT and LDT accesses).
5923 * @param GCPtrMem The address of the guest memory.
5924 * @param fAccess How the memory is being accessed. The
5925 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5926 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5927 * when raising exceptions.
5928 * @param uAlignCtl Alignment control:
5929 * - Bits 15:0 is the alignment mask.
5930 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5931 * IEM_MEMMAP_F_ALIGN_SSE, and
5932 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5933 * Pass zero to skip alignment.
5934 */
5935VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5936 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5937{
5938 /*
5939 * Check the input and figure out which mapping entry to use.
5940 */
5941 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
5942 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
5943 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
5944 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5945 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5946
5947 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5948 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5949 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5950 {
5951 iMemMap = iemMemMapFindFree(pVCpu);
5952 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5953 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5954 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5955 pVCpu->iem.s.aMemMappings[2].fAccess),
5956 VERR_IEM_IPE_9);
5957 }
5958
5959 /*
5960 * Map the memory, checking that we can actually access it. If something
5961 * slightly complicated happens, fall back on bounce buffering.
5962 */
5963 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5964 if (rcStrict == VINF_SUCCESS)
5965 { /* likely */ }
5966 else
5967 return rcStrict;
5968
5969 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5970 { /* likely */ }
5971 else
5972 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5973
5974 /*
5975 * Alignment check.
5976 */
5977 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5978 { /* likelyish */ }
5979 else
5980 {
5981 /* Misaligned access. */
5982 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5983 {
5984 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5985 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5986 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5987 {
5988 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5989
5990 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5991 return iemRaiseAlignmentCheckException(pVCpu);
5992 }
5993 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5994 && iemMemAreAlignmentChecksEnabled(pVCpu)
5995/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5996 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5997 )
5998 return iemRaiseAlignmentCheckException(pVCpu);
5999 else
6000 return iemRaiseGeneralProtectionFault0(pVCpu);
6001 }
6002 }
6003
6004#ifdef IEM_WITH_DATA_TLB
6005 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6006
6007 /*
6008 * Get the TLB entry for this page.
6009 */
6010 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6011 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6012 if (pTlbe->uTag == uTag)
6013 {
6014# ifdef VBOX_WITH_STATISTICS
6015 pVCpu->iem.s.DataTlb.cTlbHits++;
6016# endif
6017 }
6018 else
6019 {
6020 pVCpu->iem.s.DataTlb.cTlbMisses++;
6021 PGMPTWALK Walk;
6022 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6023 if (RT_FAILURE(rc))
6024 {
6025 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6026# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6027 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6028 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6029# endif
6030 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6031 }
6032
6033 Assert(Walk.fSucceeded);
6034 pTlbe->uTag = uTag;
6035 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6036 pTlbe->GCPhys = Walk.GCPhys;
6037 pTlbe->pbMappingR3 = NULL;
6038 }
6039
6040 /*
6041 * Check TLB page table level access flags.
6042 */
6043 /* If the page is either supervisor only or non-writable, we need to do
6044 more careful access checks. */
6045 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6046 {
6047 /* Write to read only memory? */
6048 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6049 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6050 && ( ( pVCpu->iem.s.uCpl == 3
6051 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6052 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6053 {
6054 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6055# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6056 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6057 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6058# endif
6059 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6060 }
6061
6062 /* Kernel memory accessed by userland? */
6063 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6064 && pVCpu->iem.s.uCpl == 3
6065 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6066 {
6067 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6068# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6069 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6070 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6071# endif
6072 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6073 }
6074 }
6075
6076 /*
6077 * Set the dirty / access flags.
6078 * ASSUMES this is set when the address is translated rather than on commit...
6079 */
6080 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6081 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6082 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6083 {
6084 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6085 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6086 AssertRC(rc2);
6087 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6088 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6089 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6090 }
6091
6092 /*
6093 * Look up the physical page info if necessary.
6094 */
6095 uint8_t *pbMem = NULL;
6096 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6097# ifdef IN_RING3
6098 pbMem = pTlbe->pbMappingR3;
6099# else
6100 pbMem = NULL;
6101# endif
6102 else
6103 {
6104 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6105 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6106 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6107 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6108 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6109 { /* likely */ }
6110 else
6111 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6112 pTlbe->pbMappingR3 = NULL;
6113 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6114 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6115 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6116 &pbMem, &pTlbe->fFlagsAndPhysRev);
6117 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6118# ifdef IN_RING3
6119 pTlbe->pbMappingR3 = pbMem;
6120# endif
6121 }
6122
6123 /*
6124 * Check the physical page level access and mapping.
6125 */
6126 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6127 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6128 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6129 { /* probably likely */ }
6130 else
6131 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6132 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6133 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6134 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6135 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6136 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6137
6138 if (pbMem)
6139 {
6140 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6141 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6142 fAccess |= IEM_ACCESS_NOT_LOCKED;
6143 }
6144 else
6145 {
6146 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6147 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6148 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6149 if (rcStrict != VINF_SUCCESS)
6150 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6151 }
6152
6153 void * const pvMem = pbMem;
6154
6155 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6156 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6157 if (fAccess & IEM_ACCESS_TYPE_READ)
6158 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6159
6160#else /* !IEM_WITH_DATA_TLB */
6161
6162 RTGCPHYS GCPhysFirst;
6163 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6164 if (rcStrict != VINF_SUCCESS)
6165 return rcStrict;
6166
6167 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6168 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6169 if (fAccess & IEM_ACCESS_TYPE_READ)
6170 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6171
6172 void *pvMem;
6173 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6174 if (rcStrict != VINF_SUCCESS)
6175 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6176
6177#endif /* !IEM_WITH_DATA_TLB */
6178
6179 /*
6180 * Fill in the mapping table entry.
6181 */
6182 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6183 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6184 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6185 pVCpu->iem.s.cActiveMappings += 1;
6186
6187 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6188 *ppvMem = pvMem;
6189
6190 return VINF_SUCCESS;
6191}
6192
6193
6194/**
6195 * Commits the guest memory if bounce buffered and unmaps it.
6196 *
6197 * @returns Strict VBox status code.
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param pvMem The mapping.
6200 * @param fAccess The kind of access.
6201 */
6202VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6203{
6204 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6205 AssertReturn(iMemMap >= 0, iMemMap);
6206
6207 /* If it's bounce buffered, we may need to write back the buffer. */
6208 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6209 {
6210 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6211 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6212 }
6213 /* Otherwise unlock it. */
6214 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6215 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6216
6217 /* Free the entry. */
6218 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6219 Assert(pVCpu->iem.s.cActiveMappings != 0);
6220 pVCpu->iem.s.cActiveMappings--;
6221 return VINF_SUCCESS;
6222}
6223
6224#ifdef IEM_WITH_SETJMP
6225
6226/**
6227 * Maps the specified guest memory for the given kind of access, longjmp on
6228 * error.
6229 *
6230 * This may be using bounce buffering of the memory if it's crossing a page
6231 * boundary or if there is an access handler installed for any of it. Because
6232 * of lock prefix guarantees, we're in for some extra clutter when this
6233 * happens.
6234 *
6235 * This may raise a \#GP, \#SS, \#PF or \#AC.
6236 *
6237 * @returns Pointer to the mapped memory.
6238 *
6239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6240 * @param cbMem The number of bytes to map. This is usually 1,
6241 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6242 * string operations it can be up to a page.
6243 * @param iSegReg The index of the segment register to use for
6244 * this access. The base and limits are checked.
6245 * Use UINT8_MAX to indicate that no segmentation
6246 * is required (for IDT, GDT and LDT accesses).
6247 * @param GCPtrMem The address of the guest memory.
6248 * @param fAccess How the memory is being accessed. The
6249 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6250 * how to map the memory, while the
6251 * IEM_ACCESS_WHAT_XXX bit is used when raising
6252 * exceptions.
6253 * @param uAlignCtl Alignment control:
6254 * - Bits 15:0 is the alignment mask.
6255 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6256 * IEM_MEMMAP_F_ALIGN_SSE, and
6257 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6258 * Pass zero to skip alignment.
6259 */
6260void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6261 uint32_t uAlignCtl) RT_NOEXCEPT
6262{
6263 /*
6264 * Check the input, check segment access and adjust address
6265 * with segment base.
6266 */
6267 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6268 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6269 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6270
6271 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6272 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6273 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6274
6275 /*
6276 * Alignment check.
6277 */
6278 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6279 { /* likelyish */ }
6280 else
6281 {
6282 /* Misaligned access. */
6283 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6284 {
6285 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6286 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6287 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6288 {
6289 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6290
6291 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6292 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6293 }
6294 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6295 && iemMemAreAlignmentChecksEnabled(pVCpu)
6296/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6297 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6298 )
6299 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6300 else
6301 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6302 }
6303 }
6304
6305 /*
6306 * Figure out which mapping entry to use.
6307 */
6308 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6309 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6310 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6311 {
6312 iMemMap = iemMemMapFindFree(pVCpu);
6313 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6314 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6315 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6316 pVCpu->iem.s.aMemMappings[2].fAccess),
6317 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6318 }
6319
6320 /*
6321 * Crossing a page boundary?
6322 */
6323 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6324 { /* No (likely). */ }
6325 else
6326 {
6327 void *pvMem;
6328 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6329 if (rcStrict == VINF_SUCCESS)
6330 return pvMem;
6331 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6332 }
6333
6334#ifdef IEM_WITH_DATA_TLB
6335 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6336
6337 /*
6338 * Get the TLB entry for this page.
6339 */
6340 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6341 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6342 if (pTlbe->uTag == uTag)
6343 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6344 else
6345 {
6346 pVCpu->iem.s.DataTlb.cTlbMisses++;
6347 PGMPTWALK Walk;
6348 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6349 if (RT_FAILURE(rc))
6350 {
6351 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6352# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6353 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6354 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6355# endif
6356 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6357 }
6358
6359 Assert(Walk.fSucceeded);
6360 pTlbe->uTag = uTag;
6361 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6362 pTlbe->GCPhys = Walk.GCPhys;
6363 pTlbe->pbMappingR3 = NULL;
6364 }
6365
6366 /*
6367 * Check the flags and physical revision.
6368 */
6369 /** @todo make the caller pass these in with fAccess. */
6370 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6371 ? IEMTLBE_F_PT_NO_USER : 0;
6372 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6373 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6374 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6375 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6376 ? IEMTLBE_F_PT_NO_WRITE : 0)
6377 : 0;
6378 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6379 uint8_t *pbMem = NULL;
6380 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6381 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6382# ifdef IN_RING3
6383 pbMem = pTlbe->pbMappingR3;
6384# else
6385 pbMem = NULL;
6386# endif
6387 else
6388 {
6389 /*
6390 * Okay, something isn't quite right or needs refreshing.
6391 */
6392 /* Write to read only memory? */
6393 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6394 {
6395 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6396# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6397 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6398 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6399# endif
6400 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6401 }
6402
6403 /* Kernel memory accessed by userland? */
6404 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6405 {
6406 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6407# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6408 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6409 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6410# endif
6411 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6412 }
6413
6414 /* Set the dirty / access flags.
6415 ASSUMES this is set when the address is translated rather than on commit... */
6416 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6417 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6418 {
6419 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6420 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6421 AssertRC(rc2);
6422 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6423 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6424 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6425 }
6426
6427 /*
6428 * Check if the physical page info needs updating.
6429 */
6430 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6431# ifdef IN_RING3
6432 pbMem = pTlbe->pbMappingR3;
6433# else
6434 pbMem = NULL;
6435# endif
6436 else
6437 {
6438 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6439 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6440 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6441 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6442 pTlbe->pbMappingR3 = NULL;
6443 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6444 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6445 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6446 &pbMem, &pTlbe->fFlagsAndPhysRev);
6447 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6448# ifdef IN_RING3
6449 pTlbe->pbMappingR3 = pbMem;
6450# endif
6451 }
6452
6453 /*
6454 * Check the physical page level access and mapping.
6455 */
6456 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6457 { /* probably likely */ }
6458 else
6459 {
6460 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6461 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6462 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6463 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6464 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6465 if (rcStrict == VINF_SUCCESS)
6466 return pbMem;
6467 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6468 }
6469 }
6470 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6471
6472 if (pbMem)
6473 {
6474 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6475 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6476 fAccess |= IEM_ACCESS_NOT_LOCKED;
6477 }
6478 else
6479 {
6480 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6481 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6482 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6483 if (rcStrict == VINF_SUCCESS)
6484 return pbMem;
6485 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6486 }
6487
6488 void * const pvMem = pbMem;
6489
6490 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6491 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6492 if (fAccess & IEM_ACCESS_TYPE_READ)
6493 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6494
6495#else /* !IEM_WITH_DATA_TLB */
6496
6497
6498 RTGCPHYS GCPhysFirst;
6499 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6500 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6501 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6502
6503 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6504 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6505 if (fAccess & IEM_ACCESS_TYPE_READ)
6506 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6507
6508 void *pvMem;
6509 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6510 if (rcStrict == VINF_SUCCESS)
6511 { /* likely */ }
6512 else
6513 {
6514 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6515 if (rcStrict == VINF_SUCCESS)
6516 return pvMem;
6517 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6518 }
6519
6520#endif /* !IEM_WITH_DATA_TLB */
6521
6522 /*
6523 * Fill in the mapping table entry.
6524 */
6525 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6526 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6527 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6528 pVCpu->iem.s.cActiveMappings++;
6529
6530 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6531 return pvMem;
6532}
6533
6534
6535/**
6536 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6537 *
6538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6539 * @param pvMem The mapping.
6540 * @param fAccess The kind of access.
6541 */
6542void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6543{
6544 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6545 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6546
6547 /* If it's bounce buffered, we may need to write back the buffer. */
6548 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6549 {
6550 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6551 {
6552 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6553 if (rcStrict == VINF_SUCCESS)
6554 return;
6555 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6556 }
6557 }
6558 /* Otherwise unlock it. */
6559 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6560 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6561
6562 /* Free the entry. */
6563 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6564 Assert(pVCpu->iem.s.cActiveMappings != 0);
6565 pVCpu->iem.s.cActiveMappings--;
6566}
6567
6568#endif /* IEM_WITH_SETJMP */
6569
6570#ifndef IN_RING3
6571/**
6572 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6573 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6574 *
6575 * Allows the instruction to be completed and retired, while the IEM user will
6576 * return to ring-3 immediately afterwards and do the postponed writes there.
6577 *
6578 * @returns VBox status code (no strict statuses). Caller must check
6579 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6581 * @param pvMem The mapping.
6582 * @param fAccess The kind of access.
6583 */
6584VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6585{
6586 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6587 AssertReturn(iMemMap >= 0, iMemMap);
6588
6589 /* If it's bounce buffered, we may need to write back the buffer. */
6590 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6591 {
6592 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6593 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6594 }
6595 /* Otherwise unlock it. */
6596 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6597 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6598
6599 /* Free the entry. */
6600 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6601 Assert(pVCpu->iem.s.cActiveMappings != 0);
6602 pVCpu->iem.s.cActiveMappings--;
6603 return VINF_SUCCESS;
6604}
6605#endif
6606
6607
6608/**
6609 * Rollbacks mappings, releasing page locks and such.
6610 *
6611 * The caller shall only call this after checking cActiveMappings.
6612 *
6613 * @returns Strict VBox status code to pass up.
6614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6615 */
6616void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6617{
6618 Assert(pVCpu->iem.s.cActiveMappings > 0);
6619
6620 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6621 while (iMemMap-- > 0)
6622 {
6623 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6624 if (fAccess != IEM_ACCESS_INVALID)
6625 {
6626 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6627 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6628 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6629 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6630 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6631 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6632 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6633 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6634 pVCpu->iem.s.cActiveMappings--;
6635 }
6636 }
6637}
6638
6639
6640/**
6641 * Fetches a data byte.
6642 *
6643 * @returns Strict VBox status code.
6644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6645 * @param pu8Dst Where to return the byte.
6646 * @param iSegReg The index of the segment register to use for
6647 * this access. The base and limits are checked.
6648 * @param GCPtrMem The address of the guest memory.
6649 */
6650VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6651{
6652 /* The lazy approach for now... */
6653 uint8_t const *pu8Src;
6654 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6655 if (rc == VINF_SUCCESS)
6656 {
6657 *pu8Dst = *pu8Src;
6658 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6659 }
6660 return rc;
6661}
6662
6663
6664#ifdef IEM_WITH_SETJMP
6665/**
6666 * Fetches a data byte, longjmp on error.
6667 *
6668 * @returns The byte.
6669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6670 * @param iSegReg The index of the segment register to use for
6671 * this access. The base and limits are checked.
6672 * @param GCPtrMem The address of the guest memory.
6673 */
6674uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6675{
6676 /* The lazy approach for now... */
6677 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6678 uint8_t const bRet = *pu8Src;
6679 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6680 return bRet;
6681}
6682#endif /* IEM_WITH_SETJMP */
6683
6684
6685/**
6686 * Fetches a data word.
6687 *
6688 * @returns Strict VBox status code.
6689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6690 * @param pu16Dst Where to return the word.
6691 * @param iSegReg The index of the segment register to use for
6692 * this access. The base and limits are checked.
6693 * @param GCPtrMem The address of the guest memory.
6694 */
6695VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6696{
6697 /* The lazy approach for now... */
6698 uint16_t const *pu16Src;
6699 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6700 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6701 if (rc == VINF_SUCCESS)
6702 {
6703 *pu16Dst = *pu16Src;
6704 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6705 }
6706 return rc;
6707}
6708
6709
6710#ifdef IEM_WITH_SETJMP
6711/**
6712 * Fetches a data word, longjmp on error.
6713 *
6714 * @returns The word
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param iSegReg The index of the segment register to use for
6717 * this access. The base and limits are checked.
6718 * @param GCPtrMem The address of the guest memory.
6719 */
6720uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6721{
6722 /* The lazy approach for now... */
6723 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6724 sizeof(*pu16Src) - 1);
6725 uint16_t const u16Ret = *pu16Src;
6726 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6727 return u16Ret;
6728}
6729#endif
6730
6731
6732/**
6733 * Fetches a data dword.
6734 *
6735 * @returns Strict VBox status code.
6736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6737 * @param pu32Dst Where to return the dword.
6738 * @param iSegReg The index of the segment register to use for
6739 * this access. The base and limits are checked.
6740 * @param GCPtrMem The address of the guest memory.
6741 */
6742VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6743{
6744 /* The lazy approach for now... */
6745 uint32_t const *pu32Src;
6746 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6747 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6748 if (rc == VINF_SUCCESS)
6749 {
6750 *pu32Dst = *pu32Src;
6751 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6752 }
6753 return rc;
6754}
6755
6756
6757/**
6758 * Fetches a data dword and zero extends it to a qword.
6759 *
6760 * @returns Strict VBox status code.
6761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6762 * @param pu64Dst Where to return the qword.
6763 * @param iSegReg The index of the segment register to use for
6764 * this access. The base and limits are checked.
6765 * @param GCPtrMem The address of the guest memory.
6766 */
6767VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6768{
6769 /* The lazy approach for now... */
6770 uint32_t const *pu32Src;
6771 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6772 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6773 if (rc == VINF_SUCCESS)
6774 {
6775 *pu64Dst = *pu32Src;
6776 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6777 }
6778 return rc;
6779}
6780
6781
6782#ifdef IEM_WITH_SETJMP
6783
6784/**
6785 * Fetches a data dword, longjmp on error, fallback/safe version.
6786 *
6787 * @returns The dword
6788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6789 * @param iSegReg The index of the segment register to use for
6790 * this access. The base and limits are checked.
6791 * @param GCPtrMem The address of the guest memory.
6792 */
6793uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6794{
6795 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6796 sizeof(*pu32Src) - 1);
6797 uint32_t const u32Ret = *pu32Src;
6798 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6799 return u32Ret;
6800}
6801
6802
6803/**
6804 * Fetches a data dword, longjmp on error.
6805 *
6806 * @returns The dword
6807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6808 * @param iSegReg The index of the segment register to use for
6809 * this access. The base and limits are checked.
6810 * @param GCPtrMem The address of the guest memory.
6811 */
6812uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6813{
6814# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6815 /*
6816 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6817 */
6818 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6819 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6820 {
6821 /*
6822 * TLB lookup.
6823 */
6824 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6825 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6826 if (pTlbe->uTag == uTag)
6827 {
6828 /*
6829 * Check TLB page table level access flags.
6830 */
6831 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6832 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6833 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6834 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6835 {
6836 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6837
6838 /*
6839 * Alignment check:
6840 */
6841 /** @todo check priority \#AC vs \#PF */
6842 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6843 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6844 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6845 || pVCpu->iem.s.uCpl != 3)
6846 {
6847 /*
6848 * Fetch and return the dword
6849 */
6850 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6851 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6852 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6853 }
6854 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6855 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6856 }
6857 }
6858 }
6859
6860 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6861 outdated page pointer, or other troubles. */
6862 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6863 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6864
6865# else
6866 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6867 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6868 uint32_t const u32Ret = *pu32Src;
6869 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6870 return u32Ret;
6871# endif
6872}
6873#endif
6874
6875
6876#ifdef SOME_UNUSED_FUNCTION
6877/**
6878 * Fetches a data dword and sign extends it to a qword.
6879 *
6880 * @returns Strict VBox status code.
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 * @param pu64Dst Where to return the sign extended value.
6883 * @param iSegReg The index of the segment register to use for
6884 * this access. The base and limits are checked.
6885 * @param GCPtrMem The address of the guest memory.
6886 */
6887VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6888{
6889 /* The lazy approach for now... */
6890 int32_t const *pi32Src;
6891 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6892 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6893 if (rc == VINF_SUCCESS)
6894 {
6895 *pu64Dst = *pi32Src;
6896 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6897 }
6898#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6899 else
6900 *pu64Dst = 0;
6901#endif
6902 return rc;
6903}
6904#endif
6905
6906
6907/**
6908 * Fetches a data qword.
6909 *
6910 * @returns Strict VBox status code.
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 * @param pu64Dst Where to return the qword.
6913 * @param iSegReg The index of the segment register to use for
6914 * this access. The base and limits are checked.
6915 * @param GCPtrMem The address of the guest memory.
6916 */
6917VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6918{
6919 /* The lazy approach for now... */
6920 uint64_t const *pu64Src;
6921 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6922 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6923 if (rc == VINF_SUCCESS)
6924 {
6925 *pu64Dst = *pu64Src;
6926 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6927 }
6928 return rc;
6929}
6930
6931
6932#ifdef IEM_WITH_SETJMP
6933/**
6934 * Fetches a data qword, longjmp on error.
6935 *
6936 * @returns The qword.
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 * @param iSegReg The index of the segment register to use for
6939 * this access. The base and limits are checked.
6940 * @param GCPtrMem The address of the guest memory.
6941 */
6942uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6943{
6944 /* The lazy approach for now... */
6945 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6946 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6947 uint64_t const u64Ret = *pu64Src;
6948 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6949 return u64Ret;
6950}
6951#endif
6952
6953
6954/**
6955 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6956 *
6957 * @returns Strict VBox status code.
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 * @param pu64Dst Where to return the qword.
6960 * @param iSegReg The index of the segment register to use for
6961 * this access. The base and limits are checked.
6962 * @param GCPtrMem The address of the guest memory.
6963 */
6964VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6965{
6966 /* The lazy approach for now... */
6967 uint64_t const *pu64Src;
6968 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6969 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6970 if (rc == VINF_SUCCESS)
6971 {
6972 *pu64Dst = *pu64Src;
6973 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6974 }
6975 return rc;
6976}
6977
6978
6979#ifdef IEM_WITH_SETJMP
6980/**
6981 * Fetches a data qword, longjmp on error.
6982 *
6983 * @returns The qword.
6984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6985 * @param iSegReg The index of the segment register to use for
6986 * this access. The base and limits are checked.
6987 * @param GCPtrMem The address of the guest memory.
6988 */
6989uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6990{
6991 /* The lazy approach for now... */
6992 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6993 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6994 uint64_t const u64Ret = *pu64Src;
6995 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6996 return u64Ret;
6997}
6998#endif
6999
7000
7001/**
7002 * Fetches a data tword.
7003 *
7004 * @returns Strict VBox status code.
7005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7006 * @param pr80Dst Where to return the tword.
7007 * @param iSegReg The index of the segment register to use for
7008 * this access. The base and limits are checked.
7009 * @param GCPtrMem The address of the guest memory.
7010 */
7011VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7012{
7013 /* The lazy approach for now... */
7014 PCRTFLOAT80U pr80Src;
7015 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7016 if (rc == VINF_SUCCESS)
7017 {
7018 *pr80Dst = *pr80Src;
7019 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7020 }
7021 return rc;
7022}
7023
7024
7025#ifdef IEM_WITH_SETJMP
7026/**
7027 * Fetches a data tword, longjmp on error.
7028 *
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 * @param pr80Dst Where to return the tword.
7031 * @param iSegReg The index of the segment register to use for
7032 * this access. The base and limits are checked.
7033 * @param GCPtrMem The address of the guest memory.
7034 */
7035void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7036{
7037 /* The lazy approach for now... */
7038 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7039 *pr80Dst = *pr80Src;
7040 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7041}
7042#endif
7043
7044
7045/**
7046 * Fetches a data decimal tword.
7047 *
7048 * @returns Strict VBox status code.
7049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7050 * @param pd80Dst Where to return the tword.
7051 * @param iSegReg The index of the segment register to use for
7052 * this access. The base and limits are checked.
7053 * @param GCPtrMem The address of the guest memory.
7054 */
7055VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7056{
7057 /* The lazy approach for now... */
7058 PCRTPBCD80U pd80Src;
7059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7060 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7061 if (rc == VINF_SUCCESS)
7062 {
7063 *pd80Dst = *pd80Src;
7064 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7065 }
7066 return rc;
7067}
7068
7069
7070#ifdef IEM_WITH_SETJMP
7071/**
7072 * Fetches a data decimal tword, longjmp on error.
7073 *
7074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7075 * @param pd80Dst Where to return the tword.
7076 * @param iSegReg The index of the segment register to use for
7077 * this access. The base and limits are checked.
7078 * @param GCPtrMem The address of the guest memory.
7079 */
7080void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7081{
7082 /* The lazy approach for now... */
7083 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7084 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7085 *pd80Dst = *pd80Src;
7086 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7087}
7088#endif
7089
7090
7091/**
7092 * Fetches a data dqword (double qword), generally SSE related.
7093 *
7094 * @returns Strict VBox status code.
7095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7096 * @param pu128Dst Where to return the qword.
7097 * @param iSegReg The index of the segment register to use for
7098 * this access. The base and limits are checked.
7099 * @param GCPtrMem The address of the guest memory.
7100 */
7101VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7102{
7103 /* The lazy approach for now... */
7104 PCRTUINT128U pu128Src;
7105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7106 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7107 if (rc == VINF_SUCCESS)
7108 {
7109 pu128Dst->au64[0] = pu128Src->au64[0];
7110 pu128Dst->au64[1] = pu128Src->au64[1];
7111 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7112 }
7113 return rc;
7114}
7115
7116
7117#ifdef IEM_WITH_SETJMP
7118/**
7119 * Fetches a data dqword (double qword), generally SSE related.
7120 *
7121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7122 * @param pu128Dst Where to return the qword.
7123 * @param iSegReg The index of the segment register to use for
7124 * this access. The base and limits are checked.
7125 * @param GCPtrMem The address of the guest memory.
7126 */
7127void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7128{
7129 /* The lazy approach for now... */
7130 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7131 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7132 pu128Dst->au64[0] = pu128Src->au64[0];
7133 pu128Dst->au64[1] = pu128Src->au64[1];
7134 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7135}
7136#endif
7137
7138
7139/**
7140 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7141 * related.
7142 *
7143 * Raises \#GP(0) if not aligned.
7144 *
7145 * @returns Strict VBox status code.
7146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7147 * @param pu128Dst Where to return the qword.
7148 * @param iSegReg The index of the segment register to use for
7149 * this access. The base and limits are checked.
7150 * @param GCPtrMem The address of the guest memory.
7151 */
7152VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7153{
7154 /* The lazy approach for now... */
7155 PCRTUINT128U pu128Src;
7156 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7157 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7158 if (rc == VINF_SUCCESS)
7159 {
7160 pu128Dst->au64[0] = pu128Src->au64[0];
7161 pu128Dst->au64[1] = pu128Src->au64[1];
7162 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7163 }
7164 return rc;
7165}
7166
7167
7168#ifdef IEM_WITH_SETJMP
7169/**
7170 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7171 * related, longjmp on error.
7172 *
7173 * Raises \#GP(0) if not aligned.
7174 *
7175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7176 * @param pu128Dst Where to return the qword.
7177 * @param iSegReg The index of the segment register to use for
7178 * this access. The base and limits are checked.
7179 * @param GCPtrMem The address of the guest memory.
7180 */
7181void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7182{
7183 /* The lazy approach for now... */
7184 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7185 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7186 pu128Dst->au64[0] = pu128Src->au64[0];
7187 pu128Dst->au64[1] = pu128Src->au64[1];
7188 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7189}
7190#endif
7191
7192
7193/**
7194 * Fetches a data oword (octo word), generally AVX related.
7195 *
7196 * @returns Strict VBox status code.
7197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7198 * @param pu256Dst Where to return the qword.
7199 * @param iSegReg The index of the segment register to use for
7200 * this access. The base and limits are checked.
7201 * @param GCPtrMem The address of the guest memory.
7202 */
7203VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7204{
7205 /* The lazy approach for now... */
7206 PCRTUINT256U pu256Src;
7207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7208 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7209 if (rc == VINF_SUCCESS)
7210 {
7211 pu256Dst->au64[0] = pu256Src->au64[0];
7212 pu256Dst->au64[1] = pu256Src->au64[1];
7213 pu256Dst->au64[2] = pu256Src->au64[2];
7214 pu256Dst->au64[3] = pu256Src->au64[3];
7215 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7216 }
7217 return rc;
7218}
7219
7220
7221#ifdef IEM_WITH_SETJMP
7222/**
7223 * Fetches a data oword (octo word), generally AVX related.
7224 *
7225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7226 * @param pu256Dst Where to return the qword.
7227 * @param iSegReg The index of the segment register to use for
7228 * this access. The base and limits are checked.
7229 * @param GCPtrMem The address of the guest memory.
7230 */
7231void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7232{
7233 /* The lazy approach for now... */
7234 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7235 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7236 pu256Dst->au64[0] = pu256Src->au64[0];
7237 pu256Dst->au64[1] = pu256Src->au64[1];
7238 pu256Dst->au64[2] = pu256Src->au64[2];
7239 pu256Dst->au64[3] = pu256Src->au64[3];
7240 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7241}
7242#endif
7243
7244
7245/**
7246 * Fetches a data oword (octo word) at an aligned address, generally AVX
7247 * related.
7248 *
7249 * Raises \#GP(0) if not aligned.
7250 *
7251 * @returns Strict VBox status code.
7252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7253 * @param pu256Dst Where to return the qword.
7254 * @param iSegReg The index of the segment register to use for
7255 * this access. The base and limits are checked.
7256 * @param GCPtrMem The address of the guest memory.
7257 */
7258VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7259{
7260 /* The lazy approach for now... */
7261 PCRTUINT256U pu256Src;
7262 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7263 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7264 if (rc == VINF_SUCCESS)
7265 {
7266 pu256Dst->au64[0] = pu256Src->au64[0];
7267 pu256Dst->au64[1] = pu256Src->au64[1];
7268 pu256Dst->au64[2] = pu256Src->au64[2];
7269 pu256Dst->au64[3] = pu256Src->au64[3];
7270 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7271 }
7272 return rc;
7273}
7274
7275
7276#ifdef IEM_WITH_SETJMP
7277/**
7278 * Fetches a data oword (octo word) at an aligned address, generally AVX
7279 * related, longjmp on error.
7280 *
7281 * Raises \#GP(0) if not aligned.
7282 *
7283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7284 * @param pu256Dst Where to return the qword.
7285 * @param iSegReg The index of the segment register to use for
7286 * this access. The base and limits are checked.
7287 * @param GCPtrMem The address of the guest memory.
7288 */
7289void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7290{
7291 /* The lazy approach for now... */
7292 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7293 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7294 pu256Dst->au64[0] = pu256Src->au64[0];
7295 pu256Dst->au64[1] = pu256Src->au64[1];
7296 pu256Dst->au64[2] = pu256Src->au64[2];
7297 pu256Dst->au64[3] = pu256Src->au64[3];
7298 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7299}
7300#endif
7301
7302
7303
7304/**
7305 * Fetches a descriptor register (lgdt, lidt).
7306 *
7307 * @returns Strict VBox status code.
7308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7309 * @param pcbLimit Where to return the limit.
7310 * @param pGCPtrBase Where to return the base.
7311 * @param iSegReg The index of the segment register to use for
7312 * this access. The base and limits are checked.
7313 * @param GCPtrMem The address of the guest memory.
7314 * @param enmOpSize The effective operand size.
7315 */
7316VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7317 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7318{
7319 /*
7320 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7321 * little special:
7322 * - The two reads are done separately.
7323 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7324 * - We suspect the 386 to actually commit the limit before the base in
7325 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7326 * don't try emulate this eccentric behavior, because it's not well
7327 * enough understood and rather hard to trigger.
7328 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7329 */
7330 VBOXSTRICTRC rcStrict;
7331 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7332 {
7333 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7334 if (rcStrict == VINF_SUCCESS)
7335 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7336 }
7337 else
7338 {
7339 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7340 if (enmOpSize == IEMMODE_32BIT)
7341 {
7342 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7343 {
7344 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7345 if (rcStrict == VINF_SUCCESS)
7346 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7347 }
7348 else
7349 {
7350 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7351 if (rcStrict == VINF_SUCCESS)
7352 {
7353 *pcbLimit = (uint16_t)uTmp;
7354 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7355 }
7356 }
7357 if (rcStrict == VINF_SUCCESS)
7358 *pGCPtrBase = uTmp;
7359 }
7360 else
7361 {
7362 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7363 if (rcStrict == VINF_SUCCESS)
7364 {
7365 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7366 if (rcStrict == VINF_SUCCESS)
7367 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7368 }
7369 }
7370 }
7371 return rcStrict;
7372}
7373
7374
7375
7376/**
7377 * Stores a data byte.
7378 *
7379 * @returns Strict VBox status code.
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param iSegReg The index of the segment register to use for
7382 * this access. The base and limits are checked.
7383 * @param GCPtrMem The address of the guest memory.
7384 * @param u8Value The value to store.
7385 */
7386VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7387{
7388 /* The lazy approach for now... */
7389 uint8_t *pu8Dst;
7390 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7391 if (rc == VINF_SUCCESS)
7392 {
7393 *pu8Dst = u8Value;
7394 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7395 }
7396 return rc;
7397}
7398
7399
7400#ifdef IEM_WITH_SETJMP
7401/**
7402 * Stores a data byte, longjmp on error.
7403 *
7404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7405 * @param iSegReg The index of the segment register to use for
7406 * this access. The base and limits are checked.
7407 * @param GCPtrMem The address of the guest memory.
7408 * @param u8Value The value to store.
7409 */
7410void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7411{
7412 /* The lazy approach for now... */
7413 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7414 *pu8Dst = u8Value;
7415 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7416}
7417#endif
7418
7419
7420/**
7421 * Stores a data word.
7422 *
7423 * @returns Strict VBox status code.
7424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7425 * @param iSegReg The index of the segment register to use for
7426 * this access. The base and limits are checked.
7427 * @param GCPtrMem The address of the guest memory.
7428 * @param u16Value The value to store.
7429 */
7430VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7431{
7432 /* The lazy approach for now... */
7433 uint16_t *pu16Dst;
7434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7435 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7436 if (rc == VINF_SUCCESS)
7437 {
7438 *pu16Dst = u16Value;
7439 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7440 }
7441 return rc;
7442}
7443
7444
7445#ifdef IEM_WITH_SETJMP
7446/**
7447 * Stores a data word, longjmp on error.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param iSegReg The index of the segment register to use for
7451 * this access. The base and limits are checked.
7452 * @param GCPtrMem The address of the guest memory.
7453 * @param u16Value The value to store.
7454 */
7455void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7456{
7457 /* The lazy approach for now... */
7458 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7459 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7460 *pu16Dst = u16Value;
7461 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7462}
7463#endif
7464
7465
7466/**
7467 * Stores a data dword.
7468 *
7469 * @returns Strict VBox status code.
7470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7471 * @param iSegReg The index of the segment register to use for
7472 * this access. The base and limits are checked.
7473 * @param GCPtrMem The address of the guest memory.
7474 * @param u32Value The value to store.
7475 */
7476VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7477{
7478 /* The lazy approach for now... */
7479 uint32_t *pu32Dst;
7480 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7481 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7482 if (rc == VINF_SUCCESS)
7483 {
7484 *pu32Dst = u32Value;
7485 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7486 }
7487 return rc;
7488}
7489
7490
7491#ifdef IEM_WITH_SETJMP
7492/**
7493 * Stores a data dword.
7494 *
7495 * @returns Strict VBox status code.
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param iSegReg The index of the segment register to use for
7498 * this access. The base and limits are checked.
7499 * @param GCPtrMem The address of the guest memory.
7500 * @param u32Value The value to store.
7501 */
7502void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7503{
7504 /* The lazy approach for now... */
7505 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7506 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7507 *pu32Dst = u32Value;
7508 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7509}
7510#endif
7511
7512
7513/**
7514 * Stores a data qword.
7515 *
7516 * @returns Strict VBox status code.
7517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 * @param u64Value The value to store.
7522 */
7523VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7524{
7525 /* The lazy approach for now... */
7526 uint64_t *pu64Dst;
7527 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7528 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7529 if (rc == VINF_SUCCESS)
7530 {
7531 *pu64Dst = u64Value;
7532 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7533 }
7534 return rc;
7535}
7536
7537
7538#ifdef IEM_WITH_SETJMP
7539/**
7540 * Stores a data qword, longjmp on error.
7541 *
7542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7543 * @param iSegReg The index of the segment register to use for
7544 * this access. The base and limits are checked.
7545 * @param GCPtrMem The address of the guest memory.
7546 * @param u64Value The value to store.
7547 */
7548void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7549{
7550 /* The lazy approach for now... */
7551 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7552 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7553 *pu64Dst = u64Value;
7554 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7555}
7556#endif
7557
7558
7559/**
7560 * Stores a data dqword.
7561 *
7562 * @returns Strict VBox status code.
7563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7564 * @param iSegReg The index of the segment register to use for
7565 * this access. The base and limits are checked.
7566 * @param GCPtrMem The address of the guest memory.
7567 * @param u128Value The value to store.
7568 */
7569VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7570{
7571 /* The lazy approach for now... */
7572 PRTUINT128U pu128Dst;
7573 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7574 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7575 if (rc == VINF_SUCCESS)
7576 {
7577 pu128Dst->au64[0] = u128Value.au64[0];
7578 pu128Dst->au64[1] = u128Value.au64[1];
7579 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7580 }
7581 return rc;
7582}
7583
7584
7585#ifdef IEM_WITH_SETJMP
7586/**
7587 * Stores a data dqword, longjmp on error.
7588 *
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 * @param u128Value The value to store.
7594 */
7595void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7596{
7597 /* The lazy approach for now... */
7598 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7599 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7600 pu128Dst->au64[0] = u128Value.au64[0];
7601 pu128Dst->au64[1] = u128Value.au64[1];
7602 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7603}
7604#endif
7605
7606
7607/**
7608 * Stores a data dqword, SSE aligned.
7609 *
7610 * @returns Strict VBox status code.
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param iSegReg The index of the segment register to use for
7613 * this access. The base and limits are checked.
7614 * @param GCPtrMem The address of the guest memory.
7615 * @param u128Value The value to store.
7616 */
7617VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7618{
7619 /* The lazy approach for now... */
7620 PRTUINT128U pu128Dst;
7621 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7622 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7623 if (rc == VINF_SUCCESS)
7624 {
7625 pu128Dst->au64[0] = u128Value.au64[0];
7626 pu128Dst->au64[1] = u128Value.au64[1];
7627 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7628 }
7629 return rc;
7630}
7631
7632
7633#ifdef IEM_WITH_SETJMP
7634/**
7635 * Stores a data dqword, SSE aligned.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param iSegReg The index of the segment register to use for
7640 * this access. The base and limits are checked.
7641 * @param GCPtrMem The address of the guest memory.
7642 * @param u128Value The value to store.
7643 */
7644void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7645{
7646 /* The lazy approach for now... */
7647 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7648 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7649 pu128Dst->au64[0] = u128Value.au64[0];
7650 pu128Dst->au64[1] = u128Value.au64[1];
7651 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7652}
7653#endif
7654
7655
7656/**
7657 * Stores a data dqword.
7658 *
7659 * @returns Strict VBox status code.
7660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7661 * @param iSegReg The index of the segment register to use for
7662 * this access. The base and limits are checked.
7663 * @param GCPtrMem The address of the guest memory.
7664 * @param pu256Value Pointer to the value to store.
7665 */
7666VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7667{
7668 /* The lazy approach for now... */
7669 PRTUINT256U pu256Dst;
7670 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7671 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7672 if (rc == VINF_SUCCESS)
7673 {
7674 pu256Dst->au64[0] = pu256Value->au64[0];
7675 pu256Dst->au64[1] = pu256Value->au64[1];
7676 pu256Dst->au64[2] = pu256Value->au64[2];
7677 pu256Dst->au64[3] = pu256Value->au64[3];
7678 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7679 }
7680 return rc;
7681}
7682
7683
7684#ifdef IEM_WITH_SETJMP
7685/**
7686 * Stores a data dqword, longjmp on error.
7687 *
7688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7689 * @param iSegReg The index of the segment register to use for
7690 * this access. The base and limits are checked.
7691 * @param GCPtrMem The address of the guest memory.
7692 * @param pu256Value Pointer to the value to store.
7693 */
7694void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7695{
7696 /* The lazy approach for now... */
7697 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7698 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7699 pu256Dst->au64[0] = pu256Value->au64[0];
7700 pu256Dst->au64[1] = pu256Value->au64[1];
7701 pu256Dst->au64[2] = pu256Value->au64[2];
7702 pu256Dst->au64[3] = pu256Value->au64[3];
7703 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7704}
7705#endif
7706
7707
7708/**
7709 * Stores a data dqword, AVX \#GP(0) aligned.
7710 *
7711 * @returns Strict VBox status code.
7712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7713 * @param iSegReg The index of the segment register to use for
7714 * this access. The base and limits are checked.
7715 * @param GCPtrMem The address of the guest memory.
7716 * @param pu256Value Pointer to the value to store.
7717 */
7718VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7719{
7720 /* The lazy approach for now... */
7721 PRTUINT256U pu256Dst;
7722 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7723 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7724 if (rc == VINF_SUCCESS)
7725 {
7726 pu256Dst->au64[0] = pu256Value->au64[0];
7727 pu256Dst->au64[1] = pu256Value->au64[1];
7728 pu256Dst->au64[2] = pu256Value->au64[2];
7729 pu256Dst->au64[3] = pu256Value->au64[3];
7730 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7731 }
7732 return rc;
7733}
7734
7735
7736#ifdef IEM_WITH_SETJMP
7737/**
7738 * Stores a data dqword, AVX aligned.
7739 *
7740 * @returns Strict VBox status code.
7741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7742 * @param iSegReg The index of the segment register to use for
7743 * this access. The base and limits are checked.
7744 * @param GCPtrMem The address of the guest memory.
7745 * @param pu256Value Pointer to the value to store.
7746 */
7747void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7748{
7749 /* The lazy approach for now... */
7750 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7751 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7752 pu256Dst->au64[0] = pu256Value->au64[0];
7753 pu256Dst->au64[1] = pu256Value->au64[1];
7754 pu256Dst->au64[2] = pu256Value->au64[2];
7755 pu256Dst->au64[3] = pu256Value->au64[3];
7756 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7757}
7758#endif
7759
7760
7761/**
7762 * Stores a descriptor register (sgdt, sidt).
7763 *
7764 * @returns Strict VBox status code.
7765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7766 * @param cbLimit The limit.
7767 * @param GCPtrBase The base address.
7768 * @param iSegReg The index of the segment register to use for
7769 * this access. The base and limits are checked.
7770 * @param GCPtrMem The address of the guest memory.
7771 */
7772VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7773{
7774 /*
7775 * The SIDT and SGDT instructions actually stores the data using two
7776 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7777 * does not respond to opsize prefixes.
7778 */
7779 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7780 if (rcStrict == VINF_SUCCESS)
7781 {
7782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7783 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7784 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7785 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7786 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7787 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7788 else
7789 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7790 }
7791 return rcStrict;
7792}
7793
7794
7795/**
7796 * Pushes a word onto the stack.
7797 *
7798 * @returns Strict VBox status code.
7799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7800 * @param u16Value The value to push.
7801 */
7802VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7803{
7804 /* Increment the stack pointer. */
7805 uint64_t uNewRsp;
7806 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7807
7808 /* Write the word the lazy way. */
7809 uint16_t *pu16Dst;
7810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7811 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7812 if (rc == VINF_SUCCESS)
7813 {
7814 *pu16Dst = u16Value;
7815 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7816 }
7817
7818 /* Commit the new RSP value unless we an access handler made trouble. */
7819 if (rc == VINF_SUCCESS)
7820 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7821
7822 return rc;
7823}
7824
7825
7826/**
7827 * Pushes a dword onto the stack.
7828 *
7829 * @returns Strict VBox status code.
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param u32Value The value to push.
7832 */
7833VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7834{
7835 /* Increment the stack pointer. */
7836 uint64_t uNewRsp;
7837 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7838
7839 /* Write the dword the lazy way. */
7840 uint32_t *pu32Dst;
7841 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7842 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7843 if (rc == VINF_SUCCESS)
7844 {
7845 *pu32Dst = u32Value;
7846 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7847 }
7848
7849 /* Commit the new RSP value unless we an access handler made trouble. */
7850 if (rc == VINF_SUCCESS)
7851 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7852
7853 return rc;
7854}
7855
7856
7857/**
7858 * Pushes a dword segment register value onto the stack.
7859 *
7860 * @returns Strict VBox status code.
7861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7862 * @param u32Value The value to push.
7863 */
7864VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7865{
7866 /* Increment the stack pointer. */
7867 uint64_t uNewRsp;
7868 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7869
7870 /* The intel docs talks about zero extending the selector register
7871 value. My actual intel CPU here might be zero extending the value
7872 but it still only writes the lower word... */
7873 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7874 * happens when crossing an electric page boundrary, is the high word checked
7875 * for write accessibility or not? Probably it is. What about segment limits?
7876 * It appears this behavior is also shared with trap error codes.
7877 *
7878 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7879 * ancient hardware when it actually did change. */
7880 uint16_t *pu16Dst;
7881 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7882 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7883 if (rc == VINF_SUCCESS)
7884 {
7885 *pu16Dst = (uint16_t)u32Value;
7886 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7887 }
7888
7889 /* Commit the new RSP value unless we an access handler made trouble. */
7890 if (rc == VINF_SUCCESS)
7891 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7892
7893 return rc;
7894}
7895
7896
7897/**
7898 * Pushes a qword onto the stack.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param u64Value The value to push.
7903 */
7904VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7905{
7906 /* Increment the stack pointer. */
7907 uint64_t uNewRsp;
7908 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7909
7910 /* Write the word the lazy way. */
7911 uint64_t *pu64Dst;
7912 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7913 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7914 if (rc == VINF_SUCCESS)
7915 {
7916 *pu64Dst = u64Value;
7917 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7918 }
7919
7920 /* Commit the new RSP value unless we an access handler made trouble. */
7921 if (rc == VINF_SUCCESS)
7922 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7923
7924 return rc;
7925}
7926
7927
7928/**
7929 * Pops a word from the stack.
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7933 * @param pu16Value Where to store the popped value.
7934 */
7935VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7936{
7937 /* Increment the stack pointer. */
7938 uint64_t uNewRsp;
7939 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7940
7941 /* Write the word the lazy way. */
7942 uint16_t const *pu16Src;
7943 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7944 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7945 if (rc == VINF_SUCCESS)
7946 {
7947 *pu16Value = *pu16Src;
7948 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7949
7950 /* Commit the new RSP value. */
7951 if (rc == VINF_SUCCESS)
7952 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7953 }
7954
7955 return rc;
7956}
7957
7958
7959/**
7960 * Pops a dword from the stack.
7961 *
7962 * @returns Strict VBox status code.
7963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7964 * @param pu32Value Where to store the popped value.
7965 */
7966VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7967{
7968 /* Increment the stack pointer. */
7969 uint64_t uNewRsp;
7970 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7971
7972 /* Write the word the lazy way. */
7973 uint32_t const *pu32Src;
7974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7975 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7976 if (rc == VINF_SUCCESS)
7977 {
7978 *pu32Value = *pu32Src;
7979 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7980
7981 /* Commit the new RSP value. */
7982 if (rc == VINF_SUCCESS)
7983 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7984 }
7985
7986 return rc;
7987}
7988
7989
7990/**
7991 * Pops a qword from the stack.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param pu64Value Where to store the popped value.
7996 */
7997VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7998{
7999 /* Increment the stack pointer. */
8000 uint64_t uNewRsp;
8001 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8002
8003 /* Write the word the lazy way. */
8004 uint64_t const *pu64Src;
8005 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8006 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8007 if (rc == VINF_SUCCESS)
8008 {
8009 *pu64Value = *pu64Src;
8010 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8011
8012 /* Commit the new RSP value. */
8013 if (rc == VINF_SUCCESS)
8014 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8015 }
8016
8017 return rc;
8018}
8019
8020
8021/**
8022 * Pushes a word onto the stack, using a temporary stack pointer.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8026 * @param u16Value The value to push.
8027 * @param pTmpRsp Pointer to the temporary stack pointer.
8028 */
8029VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8030{
8031 /* Increment the stack pointer. */
8032 RTUINT64U NewRsp = *pTmpRsp;
8033 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8034
8035 /* Write the word the lazy way. */
8036 uint16_t *pu16Dst;
8037 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8038 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8039 if (rc == VINF_SUCCESS)
8040 {
8041 *pu16Dst = u16Value;
8042 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8043 }
8044
8045 /* Commit the new RSP value unless we an access handler made trouble. */
8046 if (rc == VINF_SUCCESS)
8047 *pTmpRsp = NewRsp;
8048
8049 return rc;
8050}
8051
8052
8053/**
8054 * Pushes a dword onto the stack, using a temporary stack pointer.
8055 *
8056 * @returns Strict VBox status code.
8057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8058 * @param u32Value The value to push.
8059 * @param pTmpRsp Pointer to the temporary stack pointer.
8060 */
8061VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8062{
8063 /* Increment the stack pointer. */
8064 RTUINT64U NewRsp = *pTmpRsp;
8065 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8066
8067 /* Write the word the lazy way. */
8068 uint32_t *pu32Dst;
8069 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8070 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8071 if (rc == VINF_SUCCESS)
8072 {
8073 *pu32Dst = u32Value;
8074 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8075 }
8076
8077 /* Commit the new RSP value unless we an access handler made trouble. */
8078 if (rc == VINF_SUCCESS)
8079 *pTmpRsp = NewRsp;
8080
8081 return rc;
8082}
8083
8084
8085/**
8086 * Pushes a dword onto the stack, using a temporary stack pointer.
8087 *
8088 * @returns Strict VBox status code.
8089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8090 * @param u64Value The value to push.
8091 * @param pTmpRsp Pointer to the temporary stack pointer.
8092 */
8093VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8094{
8095 /* Increment the stack pointer. */
8096 RTUINT64U NewRsp = *pTmpRsp;
8097 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8098
8099 /* Write the word the lazy way. */
8100 uint64_t *pu64Dst;
8101 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8102 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8103 if (rc == VINF_SUCCESS)
8104 {
8105 *pu64Dst = u64Value;
8106 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8107 }
8108
8109 /* Commit the new RSP value unless we an access handler made trouble. */
8110 if (rc == VINF_SUCCESS)
8111 *pTmpRsp = NewRsp;
8112
8113 return rc;
8114}
8115
8116
8117/**
8118 * Pops a word from the stack, using a temporary stack pointer.
8119 *
8120 * @returns Strict VBox status code.
8121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8122 * @param pu16Value Where to store the popped value.
8123 * @param pTmpRsp Pointer to the temporary stack pointer.
8124 */
8125VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8126{
8127 /* Increment the stack pointer. */
8128 RTUINT64U NewRsp = *pTmpRsp;
8129 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8130
8131 /* Write the word the lazy way. */
8132 uint16_t const *pu16Src;
8133 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8134 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8135 if (rc == VINF_SUCCESS)
8136 {
8137 *pu16Value = *pu16Src;
8138 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8139
8140 /* Commit the new RSP value. */
8141 if (rc == VINF_SUCCESS)
8142 *pTmpRsp = NewRsp;
8143 }
8144
8145 return rc;
8146}
8147
8148
8149/**
8150 * Pops a dword from the stack, using a temporary stack pointer.
8151 *
8152 * @returns Strict VBox status code.
8153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8154 * @param pu32Value Where to store the popped value.
8155 * @param pTmpRsp Pointer to the temporary stack pointer.
8156 */
8157VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8158{
8159 /* Increment the stack pointer. */
8160 RTUINT64U NewRsp = *pTmpRsp;
8161 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8162
8163 /* Write the word the lazy way. */
8164 uint32_t const *pu32Src;
8165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8166 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8167 if (rc == VINF_SUCCESS)
8168 {
8169 *pu32Value = *pu32Src;
8170 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8171
8172 /* Commit the new RSP value. */
8173 if (rc == VINF_SUCCESS)
8174 *pTmpRsp = NewRsp;
8175 }
8176
8177 return rc;
8178}
8179
8180
8181/**
8182 * Pops a qword from the stack, using a temporary stack pointer.
8183 *
8184 * @returns Strict VBox status code.
8185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8186 * @param pu64Value Where to store the popped value.
8187 * @param pTmpRsp Pointer to the temporary stack pointer.
8188 */
8189VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8190{
8191 /* Increment the stack pointer. */
8192 RTUINT64U NewRsp = *pTmpRsp;
8193 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8194
8195 /* Write the word the lazy way. */
8196 uint64_t const *pu64Src;
8197 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8198 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8199 if (rcStrict == VINF_SUCCESS)
8200 {
8201 *pu64Value = *pu64Src;
8202 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8203
8204 /* Commit the new RSP value. */
8205 if (rcStrict == VINF_SUCCESS)
8206 *pTmpRsp = NewRsp;
8207 }
8208
8209 return rcStrict;
8210}
8211
8212
8213/**
8214 * Begin a special stack push (used by interrupt, exceptions and such).
8215 *
8216 * This will raise \#SS or \#PF if appropriate.
8217 *
8218 * @returns Strict VBox status code.
8219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8220 * @param cbMem The number of bytes to push onto the stack.
8221 * @param cbAlign The alignment mask (7, 3, 1).
8222 * @param ppvMem Where to return the pointer to the stack memory.
8223 * As with the other memory functions this could be
8224 * direct access or bounce buffered access, so
8225 * don't commit register until the commit call
8226 * succeeds.
8227 * @param puNewRsp Where to return the new RSP value. This must be
8228 * passed unchanged to
8229 * iemMemStackPushCommitSpecial().
8230 */
8231VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8232 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8233{
8234 Assert(cbMem < UINT8_MAX);
8235 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8236 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8237 IEM_ACCESS_STACK_W, cbAlign);
8238}
8239
8240
8241/**
8242 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8243 *
8244 * This will update the rSP.
8245 *
8246 * @returns Strict VBox status code.
8247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8248 * @param pvMem The pointer returned by
8249 * iemMemStackPushBeginSpecial().
8250 * @param uNewRsp The new RSP value returned by
8251 * iemMemStackPushBeginSpecial().
8252 */
8253VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8254{
8255 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8256 if (rcStrict == VINF_SUCCESS)
8257 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8258 return rcStrict;
8259}
8260
8261
8262/**
8263 * Begin a special stack pop (used by iret, retf and such).
8264 *
8265 * This will raise \#SS or \#PF if appropriate.
8266 *
8267 * @returns Strict VBox status code.
8268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8269 * @param cbMem The number of bytes to pop from the stack.
8270 * @param cbAlign The alignment mask (7, 3, 1).
8271 * @param ppvMem Where to return the pointer to the stack memory.
8272 * @param puNewRsp Where to return the new RSP value. This must be
8273 * assigned to CPUMCTX::rsp manually some time
8274 * after iemMemStackPopDoneSpecial() has been
8275 * called.
8276 */
8277VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8278 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8279{
8280 Assert(cbMem < UINT8_MAX);
8281 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8282 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8283}
8284
8285
8286/**
8287 * Continue a special stack pop (used by iret and retf), for the purpose of
8288 * retrieving a new stack pointer.
8289 *
8290 * This will raise \#SS or \#PF if appropriate.
8291 *
8292 * @returns Strict VBox status code.
8293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8294 * @param off Offset from the top of the stack. This is zero
8295 * except in the retf case.
8296 * @param cbMem The number of bytes to pop from the stack.
8297 * @param ppvMem Where to return the pointer to the stack memory.
8298 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8299 * return this because all use of this function is
8300 * to retrieve a new value and anything we return
8301 * here would be discarded.)
8302 */
8303VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8304 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8305{
8306 Assert(cbMem < UINT8_MAX);
8307
8308 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8309 RTGCPTR GCPtrTop;
8310 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8311 GCPtrTop = uCurNewRsp;
8312 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8313 GCPtrTop = (uint32_t)uCurNewRsp;
8314 else
8315 GCPtrTop = (uint16_t)uCurNewRsp;
8316
8317 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8318 0 /* checked in iemMemStackPopBeginSpecial */);
8319}
8320
8321
8322/**
8323 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8324 * iemMemStackPopContinueSpecial).
8325 *
8326 * The caller will manually commit the rSP.
8327 *
8328 * @returns Strict VBox status code.
8329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8330 * @param pvMem The pointer returned by
8331 * iemMemStackPopBeginSpecial() or
8332 * iemMemStackPopContinueSpecial().
8333 */
8334VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8335{
8336 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8337}
8338
8339
8340/**
8341 * Fetches a system table byte.
8342 *
8343 * @returns Strict VBox status code.
8344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8345 * @param pbDst Where to return the byte.
8346 * @param iSegReg The index of the segment register to use for
8347 * this access. The base and limits are checked.
8348 * @param GCPtrMem The address of the guest memory.
8349 */
8350VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8351{
8352 /* The lazy approach for now... */
8353 uint8_t const *pbSrc;
8354 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8355 if (rc == VINF_SUCCESS)
8356 {
8357 *pbDst = *pbSrc;
8358 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8359 }
8360 return rc;
8361}
8362
8363
8364/**
8365 * Fetches a system table word.
8366 *
8367 * @returns Strict VBox status code.
8368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8369 * @param pu16Dst Where to return the word.
8370 * @param iSegReg The index of the segment register to use for
8371 * this access. The base and limits are checked.
8372 * @param GCPtrMem The address of the guest memory.
8373 */
8374VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8375{
8376 /* The lazy approach for now... */
8377 uint16_t const *pu16Src;
8378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8379 if (rc == VINF_SUCCESS)
8380 {
8381 *pu16Dst = *pu16Src;
8382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8383 }
8384 return rc;
8385}
8386
8387
8388/**
8389 * Fetches a system table dword.
8390 *
8391 * @returns Strict VBox status code.
8392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8393 * @param pu32Dst Where to return the dword.
8394 * @param iSegReg The index of the segment register to use for
8395 * this access. The base and limits are checked.
8396 * @param GCPtrMem The address of the guest memory.
8397 */
8398VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8399{
8400 /* The lazy approach for now... */
8401 uint32_t const *pu32Src;
8402 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8403 if (rc == VINF_SUCCESS)
8404 {
8405 *pu32Dst = *pu32Src;
8406 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8407 }
8408 return rc;
8409}
8410
8411
8412/**
8413 * Fetches a system table qword.
8414 *
8415 * @returns Strict VBox status code.
8416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8417 * @param pu64Dst Where to return the qword.
8418 * @param iSegReg The index of the segment register to use for
8419 * this access. The base and limits are checked.
8420 * @param GCPtrMem The address of the guest memory.
8421 */
8422VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8423{
8424 /* The lazy approach for now... */
8425 uint64_t const *pu64Src;
8426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8427 if (rc == VINF_SUCCESS)
8428 {
8429 *pu64Dst = *pu64Src;
8430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8431 }
8432 return rc;
8433}
8434
8435
8436/**
8437 * Fetches a descriptor table entry with caller specified error code.
8438 *
8439 * @returns Strict VBox status code.
8440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8441 * @param pDesc Where to return the descriptor table entry.
8442 * @param uSel The selector which table entry to fetch.
8443 * @param uXcpt The exception to raise on table lookup error.
8444 * @param uErrorCode The error code associated with the exception.
8445 */
8446static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8447 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8448{
8449 AssertPtr(pDesc);
8450 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8451
8452 /** @todo did the 286 require all 8 bytes to be accessible? */
8453 /*
8454 * Get the selector table base and check bounds.
8455 */
8456 RTGCPTR GCPtrBase;
8457 if (uSel & X86_SEL_LDT)
8458 {
8459 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8460 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8461 {
8462 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8463 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8464 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8465 uErrorCode, 0);
8466 }
8467
8468 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8469 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8470 }
8471 else
8472 {
8473 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8474 {
8475 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8476 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8477 uErrorCode, 0);
8478 }
8479 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8480 }
8481
8482 /*
8483 * Read the legacy descriptor and maybe the long mode extensions if
8484 * required.
8485 */
8486 VBOXSTRICTRC rcStrict;
8487 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8488 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8489 else
8490 {
8491 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8492 if (rcStrict == VINF_SUCCESS)
8493 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8494 if (rcStrict == VINF_SUCCESS)
8495 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8496 if (rcStrict == VINF_SUCCESS)
8497 pDesc->Legacy.au16[3] = 0;
8498 else
8499 return rcStrict;
8500 }
8501
8502 if (rcStrict == VINF_SUCCESS)
8503 {
8504 if ( !IEM_IS_LONG_MODE(pVCpu)
8505 || pDesc->Legacy.Gen.u1DescType)
8506 pDesc->Long.au64[1] = 0;
8507 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8508 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8509 else
8510 {
8511 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8512 /** @todo is this the right exception? */
8513 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8514 }
8515 }
8516 return rcStrict;
8517}
8518
8519
8520/**
8521 * Fetches a descriptor table entry.
8522 *
8523 * @returns Strict VBox status code.
8524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8525 * @param pDesc Where to return the descriptor table entry.
8526 * @param uSel The selector which table entry to fetch.
8527 * @param uXcpt The exception to raise on table lookup error.
8528 */
8529VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8530{
8531 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8532}
8533
8534
8535/**
8536 * Marks the selector descriptor as accessed (only non-system descriptors).
8537 *
8538 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8539 * will therefore skip the limit checks.
8540 *
8541 * @returns Strict VBox status code.
8542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8543 * @param uSel The selector.
8544 */
8545VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8546{
8547 /*
8548 * Get the selector table base and calculate the entry address.
8549 */
8550 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8551 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8552 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8553 GCPtr += uSel & X86_SEL_MASK;
8554
8555 /*
8556 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8557 * ugly stuff to avoid this. This will make sure it's an atomic access
8558 * as well more or less remove any question about 8-bit or 32-bit accesss.
8559 */
8560 VBOXSTRICTRC rcStrict;
8561 uint32_t volatile *pu32;
8562 if ((GCPtr & 3) == 0)
8563 {
8564 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8565 GCPtr += 2 + 2;
8566 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8567 if (rcStrict != VINF_SUCCESS)
8568 return rcStrict;
8569 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8570 }
8571 else
8572 {
8573 /* The misaligned GDT/LDT case, map the whole thing. */
8574 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8575 if (rcStrict != VINF_SUCCESS)
8576 return rcStrict;
8577 switch ((uintptr_t)pu32 & 3)
8578 {
8579 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8580 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8581 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8582 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8583 }
8584 }
8585
8586 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8587}
8588
8589/** @} */
8590
8591/** @name Opcode Helpers.
8592 * @{
8593 */
8594
8595/**
8596 * Calculates the effective address of a ModR/M memory operand.
8597 *
8598 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8599 *
8600 * @return Strict VBox status code.
8601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8602 * @param bRm The ModRM byte.
8603 * @param cbImm The size of any immediate following the
8604 * effective address opcode bytes. Important for
8605 * RIP relative addressing.
8606 * @param pGCPtrEff Where to return the effective address.
8607 */
8608VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8609{
8610 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8611# define SET_SS_DEF() \
8612 do \
8613 { \
8614 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8615 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8616 } while (0)
8617
8618 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8619 {
8620/** @todo Check the effective address size crap! */
8621 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8622 {
8623 uint16_t u16EffAddr;
8624
8625 /* Handle the disp16 form with no registers first. */
8626 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8627 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8628 else
8629 {
8630 /* Get the displacment. */
8631 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8632 {
8633 case 0: u16EffAddr = 0; break;
8634 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8635 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8636 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8637 }
8638
8639 /* Add the base and index registers to the disp. */
8640 switch (bRm & X86_MODRM_RM_MASK)
8641 {
8642 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8643 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8644 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8645 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8646 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8647 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8648 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8649 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8650 }
8651 }
8652
8653 *pGCPtrEff = u16EffAddr;
8654 }
8655 else
8656 {
8657 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8658 uint32_t u32EffAddr;
8659
8660 /* Handle the disp32 form with no registers first. */
8661 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8662 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8663 else
8664 {
8665 /* Get the register (or SIB) value. */
8666 switch ((bRm & X86_MODRM_RM_MASK))
8667 {
8668 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8669 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8670 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8671 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8672 case 4: /* SIB */
8673 {
8674 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8675
8676 /* Get the index and scale it. */
8677 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8678 {
8679 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8680 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8681 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8682 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8683 case 4: u32EffAddr = 0; /*none */ break;
8684 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8685 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8686 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8688 }
8689 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8690
8691 /* add base */
8692 switch (bSib & X86_SIB_BASE_MASK)
8693 {
8694 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8695 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8696 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8697 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8698 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8699 case 5:
8700 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8701 {
8702 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8703 SET_SS_DEF();
8704 }
8705 else
8706 {
8707 uint32_t u32Disp;
8708 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8709 u32EffAddr += u32Disp;
8710 }
8711 break;
8712 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8713 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8714 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8715 }
8716 break;
8717 }
8718 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8719 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8720 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8721 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8722 }
8723
8724 /* Get and add the displacement. */
8725 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8726 {
8727 case 0:
8728 break;
8729 case 1:
8730 {
8731 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8732 u32EffAddr += i8Disp;
8733 break;
8734 }
8735 case 2:
8736 {
8737 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8738 u32EffAddr += u32Disp;
8739 break;
8740 }
8741 default:
8742 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8743 }
8744
8745 }
8746 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8747 *pGCPtrEff = u32EffAddr;
8748 else
8749 {
8750 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8751 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8752 }
8753 }
8754 }
8755 else
8756 {
8757 uint64_t u64EffAddr;
8758
8759 /* Handle the rip+disp32 form with no registers first. */
8760 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8761 {
8762 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8763 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8764 }
8765 else
8766 {
8767 /* Get the register (or SIB) value. */
8768 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8769 {
8770 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8771 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8772 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8773 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8774 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8775 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8776 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8777 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8778 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8779 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8780 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8781 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8782 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8783 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8784 /* SIB */
8785 case 4:
8786 case 12:
8787 {
8788 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8789
8790 /* Get the index and scale it. */
8791 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8792 {
8793 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8794 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8795 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8796 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8797 case 4: u64EffAddr = 0; /*none */ break;
8798 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8799 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8800 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8801 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8802 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8803 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8804 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8805 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8806 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8807 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8808 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8810 }
8811 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8812
8813 /* add base */
8814 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8815 {
8816 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8817 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8818 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8819 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8820 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8821 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8822 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8823 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8824 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8825 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8826 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8827 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8828 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8829 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8830 /* complicated encodings */
8831 case 5:
8832 case 13:
8833 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8834 {
8835 if (!pVCpu->iem.s.uRexB)
8836 {
8837 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8838 SET_SS_DEF();
8839 }
8840 else
8841 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8842 }
8843 else
8844 {
8845 uint32_t u32Disp;
8846 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8847 u64EffAddr += (int32_t)u32Disp;
8848 }
8849 break;
8850 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8851 }
8852 break;
8853 }
8854 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8855 }
8856
8857 /* Get and add the displacement. */
8858 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8859 {
8860 case 0:
8861 break;
8862 case 1:
8863 {
8864 int8_t i8Disp;
8865 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8866 u64EffAddr += i8Disp;
8867 break;
8868 }
8869 case 2:
8870 {
8871 uint32_t u32Disp;
8872 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8873 u64EffAddr += (int32_t)u32Disp;
8874 break;
8875 }
8876 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8877 }
8878
8879 }
8880
8881 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8882 *pGCPtrEff = u64EffAddr;
8883 else
8884 {
8885 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8886 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8887 }
8888 }
8889
8890 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8891 return VINF_SUCCESS;
8892}
8893
8894
8895/**
8896 * Calculates the effective address of a ModR/M memory operand.
8897 *
8898 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8899 *
8900 * @return Strict VBox status code.
8901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8902 * @param bRm The ModRM byte.
8903 * @param cbImm The size of any immediate following the
8904 * effective address opcode bytes. Important for
8905 * RIP relative addressing.
8906 * @param pGCPtrEff Where to return the effective address.
8907 * @param offRsp RSP displacement.
8908 */
8909VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8910{
8911 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8912# define SET_SS_DEF() \
8913 do \
8914 { \
8915 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8916 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8917 } while (0)
8918
8919 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8920 {
8921/** @todo Check the effective address size crap! */
8922 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8923 {
8924 uint16_t u16EffAddr;
8925
8926 /* Handle the disp16 form with no registers first. */
8927 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8928 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8929 else
8930 {
8931 /* Get the displacment. */
8932 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8933 {
8934 case 0: u16EffAddr = 0; break;
8935 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8936 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8937 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8938 }
8939
8940 /* Add the base and index registers to the disp. */
8941 switch (bRm & X86_MODRM_RM_MASK)
8942 {
8943 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8944 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8945 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8946 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8947 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8948 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8949 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8950 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8951 }
8952 }
8953
8954 *pGCPtrEff = u16EffAddr;
8955 }
8956 else
8957 {
8958 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8959 uint32_t u32EffAddr;
8960
8961 /* Handle the disp32 form with no registers first. */
8962 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8963 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8964 else
8965 {
8966 /* Get the register (or SIB) value. */
8967 switch ((bRm & X86_MODRM_RM_MASK))
8968 {
8969 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8970 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8971 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8972 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8973 case 4: /* SIB */
8974 {
8975 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8976
8977 /* Get the index and scale it. */
8978 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8979 {
8980 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8981 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8982 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8983 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8984 case 4: u32EffAddr = 0; /*none */ break;
8985 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8986 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8987 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8988 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8989 }
8990 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8991
8992 /* add base */
8993 switch (bSib & X86_SIB_BASE_MASK)
8994 {
8995 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8996 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8997 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8998 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8999 case 4:
9000 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9001 SET_SS_DEF();
9002 break;
9003 case 5:
9004 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9005 {
9006 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9007 SET_SS_DEF();
9008 }
9009 else
9010 {
9011 uint32_t u32Disp;
9012 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9013 u32EffAddr += u32Disp;
9014 }
9015 break;
9016 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9017 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9019 }
9020 break;
9021 }
9022 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9023 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9024 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9026 }
9027
9028 /* Get and add the displacement. */
9029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9030 {
9031 case 0:
9032 break;
9033 case 1:
9034 {
9035 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9036 u32EffAddr += i8Disp;
9037 break;
9038 }
9039 case 2:
9040 {
9041 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9042 u32EffAddr += u32Disp;
9043 break;
9044 }
9045 default:
9046 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9047 }
9048
9049 }
9050 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9051 *pGCPtrEff = u32EffAddr;
9052 else
9053 {
9054 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9055 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9056 }
9057 }
9058 }
9059 else
9060 {
9061 uint64_t u64EffAddr;
9062
9063 /* Handle the rip+disp32 form with no registers first. */
9064 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9065 {
9066 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9067 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9068 }
9069 else
9070 {
9071 /* Get the register (or SIB) value. */
9072 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9073 {
9074 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9075 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9076 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9077 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9078 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9079 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9080 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9081 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9082 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9083 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9084 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9085 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9086 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9087 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9088 /* SIB */
9089 case 4:
9090 case 12:
9091 {
9092 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9093
9094 /* Get the index and scale it. */
9095 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9096 {
9097 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9098 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9099 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9100 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9101 case 4: u64EffAddr = 0; /*none */ break;
9102 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9103 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9104 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9105 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9106 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9107 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9108 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9109 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9110 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9111 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9112 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9113 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9114 }
9115 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9116
9117 /* add base */
9118 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9119 {
9120 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9121 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9122 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9123 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9124 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9125 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9126 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9127 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9128 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9129 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9130 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9131 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9132 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9133 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9134 /* complicated encodings */
9135 case 5:
9136 case 13:
9137 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9138 {
9139 if (!pVCpu->iem.s.uRexB)
9140 {
9141 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9142 SET_SS_DEF();
9143 }
9144 else
9145 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9146 }
9147 else
9148 {
9149 uint32_t u32Disp;
9150 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9151 u64EffAddr += (int32_t)u32Disp;
9152 }
9153 break;
9154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9155 }
9156 break;
9157 }
9158 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9159 }
9160
9161 /* Get and add the displacement. */
9162 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9163 {
9164 case 0:
9165 break;
9166 case 1:
9167 {
9168 int8_t i8Disp;
9169 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9170 u64EffAddr += i8Disp;
9171 break;
9172 }
9173 case 2:
9174 {
9175 uint32_t u32Disp;
9176 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9177 u64EffAddr += (int32_t)u32Disp;
9178 break;
9179 }
9180 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9181 }
9182
9183 }
9184
9185 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9186 *pGCPtrEff = u64EffAddr;
9187 else
9188 {
9189 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9190 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9191 }
9192 }
9193
9194 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9195 return VINF_SUCCESS;
9196}
9197
9198
9199#ifdef IEM_WITH_SETJMP
9200/**
9201 * Calculates the effective address of a ModR/M memory operand.
9202 *
9203 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9204 *
9205 * May longjmp on internal error.
9206 *
9207 * @return The effective address.
9208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9209 * @param bRm The ModRM byte.
9210 * @param cbImm The size of any immediate following the
9211 * effective address opcode bytes. Important for
9212 * RIP relative addressing.
9213 */
9214RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9215{
9216 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9217# define SET_SS_DEF() \
9218 do \
9219 { \
9220 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9221 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9222 } while (0)
9223
9224 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9225 {
9226/** @todo Check the effective address size crap! */
9227 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9228 {
9229 uint16_t u16EffAddr;
9230
9231 /* Handle the disp16 form with no registers first. */
9232 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9233 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9234 else
9235 {
9236 /* Get the displacment. */
9237 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9238 {
9239 case 0: u16EffAddr = 0; break;
9240 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9241 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9242 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9243 }
9244
9245 /* Add the base and index registers to the disp. */
9246 switch (bRm & X86_MODRM_RM_MASK)
9247 {
9248 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9249 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9250 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9251 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9252 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9253 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9254 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9255 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9256 }
9257 }
9258
9259 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9260 return u16EffAddr;
9261 }
9262
9263 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9264 uint32_t u32EffAddr;
9265
9266 /* Handle the disp32 form with no registers first. */
9267 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9268 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9269 else
9270 {
9271 /* Get the register (or SIB) value. */
9272 switch ((bRm & X86_MODRM_RM_MASK))
9273 {
9274 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9275 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9276 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9277 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9278 case 4: /* SIB */
9279 {
9280 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9281
9282 /* Get the index and scale it. */
9283 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9284 {
9285 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9286 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9287 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9288 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9289 case 4: u32EffAddr = 0; /*none */ break;
9290 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9291 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9292 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9293 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9294 }
9295 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9296
9297 /* add base */
9298 switch (bSib & X86_SIB_BASE_MASK)
9299 {
9300 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9301 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9302 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9303 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9304 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9305 case 5:
9306 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9307 {
9308 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9309 SET_SS_DEF();
9310 }
9311 else
9312 {
9313 uint32_t u32Disp;
9314 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9315 u32EffAddr += u32Disp;
9316 }
9317 break;
9318 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9319 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9320 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9321 }
9322 break;
9323 }
9324 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9325 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9326 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9327 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9328 }
9329
9330 /* Get and add the displacement. */
9331 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9332 {
9333 case 0:
9334 break;
9335 case 1:
9336 {
9337 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9338 u32EffAddr += i8Disp;
9339 break;
9340 }
9341 case 2:
9342 {
9343 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9344 u32EffAddr += u32Disp;
9345 break;
9346 }
9347 default:
9348 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9349 }
9350 }
9351
9352 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9353 {
9354 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9355 return u32EffAddr;
9356 }
9357 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9358 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9359 return u32EffAddr & UINT16_MAX;
9360 }
9361
9362 uint64_t u64EffAddr;
9363
9364 /* Handle the rip+disp32 form with no registers first. */
9365 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9366 {
9367 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9368 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9369 }
9370 else
9371 {
9372 /* Get the register (or SIB) value. */
9373 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9374 {
9375 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9376 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9377 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9378 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9379 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9380 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9381 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9382 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9383 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9384 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9385 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9386 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9387 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9388 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9389 /* SIB */
9390 case 4:
9391 case 12:
9392 {
9393 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9394
9395 /* Get the index and scale it. */
9396 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9397 {
9398 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9399 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9400 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9401 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9402 case 4: u64EffAddr = 0; /*none */ break;
9403 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9404 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9405 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9406 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9407 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9408 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9409 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9410 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9411 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9412 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9413 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9414 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9415 }
9416 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9417
9418 /* add base */
9419 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9420 {
9421 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9422 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9423 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9424 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9425 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9426 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9427 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9428 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9429 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9430 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9431 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9432 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9433 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9434 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9435 /* complicated encodings */
9436 case 5:
9437 case 13:
9438 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9439 {
9440 if (!pVCpu->iem.s.uRexB)
9441 {
9442 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9443 SET_SS_DEF();
9444 }
9445 else
9446 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9447 }
9448 else
9449 {
9450 uint32_t u32Disp;
9451 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9452 u64EffAddr += (int32_t)u32Disp;
9453 }
9454 break;
9455 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9456 }
9457 break;
9458 }
9459 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9460 }
9461
9462 /* Get and add the displacement. */
9463 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9464 {
9465 case 0:
9466 break;
9467 case 1:
9468 {
9469 int8_t i8Disp;
9470 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9471 u64EffAddr += i8Disp;
9472 break;
9473 }
9474 case 2:
9475 {
9476 uint32_t u32Disp;
9477 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9478 u64EffAddr += (int32_t)u32Disp;
9479 break;
9480 }
9481 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9482 }
9483
9484 }
9485
9486 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9487 {
9488 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9489 return u64EffAddr;
9490 }
9491 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9492 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9493 return u64EffAddr & UINT32_MAX;
9494}
9495#endif /* IEM_WITH_SETJMP */
9496
9497/** @} */
9498
9499
9500#ifdef LOG_ENABLED
9501/**
9502 * Logs the current instruction.
9503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9504 * @param fSameCtx Set if we have the same context information as the VMM,
9505 * clear if we may have already executed an instruction in
9506 * our debug context. When clear, we assume IEMCPU holds
9507 * valid CPU mode info.
9508 *
9509 * The @a fSameCtx parameter is now misleading and obsolete.
9510 * @param pszFunction The IEM function doing the execution.
9511 */
9512static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9513{
9514# ifdef IN_RING3
9515 if (LogIs2Enabled())
9516 {
9517 char szInstr[256];
9518 uint32_t cbInstr = 0;
9519 if (fSameCtx)
9520 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9521 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9522 szInstr, sizeof(szInstr), &cbInstr);
9523 else
9524 {
9525 uint32_t fFlags = 0;
9526 switch (pVCpu->iem.s.enmCpuMode)
9527 {
9528 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9529 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9530 case IEMMODE_16BIT:
9531 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9532 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9533 else
9534 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9535 break;
9536 }
9537 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9538 szInstr, sizeof(szInstr), &cbInstr);
9539 }
9540
9541 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9542 Log2(("**** %s\n"
9543 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9544 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9545 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9546 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9547 " %s\n"
9548 , pszFunction,
9549 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9550 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9551 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9552 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9553 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9554 szInstr));
9555
9556 if (LogIs3Enabled())
9557 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9558 }
9559 else
9560# endif
9561 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9562 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9563 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9564}
9565#endif /* LOG_ENABLED */
9566
9567
9568#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9569/**
9570 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9571 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9572 *
9573 * @returns Modified rcStrict.
9574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9575 * @param rcStrict The instruction execution status.
9576 */
9577static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9578{
9579 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9580 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9581 {
9582 /* VMX preemption timer takes priority over NMI-window exits. */
9583 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9584 {
9585 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9586 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9587 }
9588 /*
9589 * Check remaining intercepts.
9590 *
9591 * NMI-window and Interrupt-window VM-exits.
9592 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9593 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9594 *
9595 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9596 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9597 */
9598 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9599 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9600 && !TRPMHasTrap(pVCpu))
9601 {
9602 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9603 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9604 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9605 {
9606 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9607 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9608 }
9609 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9610 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9611 {
9612 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9613 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9614 }
9615 }
9616 }
9617 /* TPR-below threshold/APIC write has the highest priority. */
9618 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9619 {
9620 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9621 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9622 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9623 }
9624 /* MTF takes priority over VMX-preemption timer. */
9625 else
9626 {
9627 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9628 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9629 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9630 }
9631 return rcStrict;
9632}
9633#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9634
9635
9636/**
9637 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9638 * IEMExecOneWithPrefetchedByPC.
9639 *
9640 * Similar code is found in IEMExecLots.
9641 *
9642 * @return Strict VBox status code.
9643 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9644 * @param fExecuteInhibit If set, execute the instruction following CLI,
9645 * POP SS and MOV SS,GR.
9646 * @param pszFunction The calling function name.
9647 */
9648DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9649{
9650 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9651 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9652 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9653 RT_NOREF_PV(pszFunction);
9654
9655#ifdef IEM_WITH_SETJMP
9656 VBOXSTRICTRC rcStrict;
9657 jmp_buf JmpBuf;
9658 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9659 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9660 if ((rcStrict = setjmp(JmpBuf)) == 0)
9661 {
9662 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9663 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9664 }
9665 else
9666 pVCpu->iem.s.cLongJumps++;
9667 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9668#else
9669 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9670 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9671#endif
9672 if (rcStrict == VINF_SUCCESS)
9673 pVCpu->iem.s.cInstructions++;
9674 if (pVCpu->iem.s.cActiveMappings > 0)
9675 {
9676 Assert(rcStrict != VINF_SUCCESS);
9677 iemMemRollback(pVCpu);
9678 }
9679 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9680 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9681 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9682
9683//#ifdef DEBUG
9684// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9685//#endif
9686
9687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9688 /*
9689 * Perform any VMX nested-guest instruction boundary actions.
9690 *
9691 * If any of these causes a VM-exit, we must skip executing the next
9692 * instruction (would run into stale page tables). A VM-exit makes sure
9693 * there is no interrupt-inhibition, so that should ensure we don't go
9694 * to try execute the next instruction. Clearing fExecuteInhibit is
9695 * problematic because of the setjmp/longjmp clobbering above.
9696 */
9697 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9698 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9699 || rcStrict != VINF_SUCCESS)
9700 { /* likely */ }
9701 else
9702 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9703#endif
9704
9705 /* Execute the next instruction as well if a cli, pop ss or
9706 mov ss, Gr has just completed successfully. */
9707 if ( fExecuteInhibit
9708 && rcStrict == VINF_SUCCESS
9709 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9710 {
9711 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9712 if (rcStrict == VINF_SUCCESS)
9713 {
9714#ifdef LOG_ENABLED
9715 iemLogCurInstr(pVCpu, false, pszFunction);
9716#endif
9717#ifdef IEM_WITH_SETJMP
9718 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9719 if ((rcStrict = setjmp(JmpBuf)) == 0)
9720 {
9721 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9722 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9723 }
9724 else
9725 pVCpu->iem.s.cLongJumps++;
9726 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9727#else
9728 IEM_OPCODE_GET_NEXT_U8(&b);
9729 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9730#endif
9731 if (rcStrict == VINF_SUCCESS)
9732 {
9733 pVCpu->iem.s.cInstructions++;
9734#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9735 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9736 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9737 { /* likely */ }
9738 else
9739 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9740#endif
9741 }
9742 if (pVCpu->iem.s.cActiveMappings > 0)
9743 {
9744 Assert(rcStrict != VINF_SUCCESS);
9745 iemMemRollback(pVCpu);
9746 }
9747 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9748 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9749 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9750 }
9751 else if (pVCpu->iem.s.cActiveMappings > 0)
9752 iemMemRollback(pVCpu);
9753 /** @todo drop this after we bake this change into RIP advancing. */
9754 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9755 }
9756
9757 /*
9758 * Return value fiddling, statistics and sanity assertions.
9759 */
9760 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9761
9762 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9763 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9764 return rcStrict;
9765}
9766
9767
9768/**
9769 * Execute one instruction.
9770 *
9771 * @return Strict VBox status code.
9772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9773 */
9774VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9775{
9776 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9777#ifdef LOG_ENABLED
9778 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9779#endif
9780
9781 /*
9782 * Do the decoding and emulation.
9783 */
9784 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9785 if (rcStrict == VINF_SUCCESS)
9786 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9787 else if (pVCpu->iem.s.cActiveMappings > 0)
9788 iemMemRollback(pVCpu);
9789
9790 if (rcStrict != VINF_SUCCESS)
9791 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9792 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9793 return rcStrict;
9794}
9795
9796
9797VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9798{
9799 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9800 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9801 if (rcStrict == VINF_SUCCESS)
9802 {
9803 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9804 if (pcbWritten)
9805 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9806 }
9807 else if (pVCpu->iem.s.cActiveMappings > 0)
9808 iemMemRollback(pVCpu);
9809
9810 return rcStrict;
9811}
9812
9813
9814VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9815 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9816{
9817 VBOXSTRICTRC rcStrict;
9818 if ( cbOpcodeBytes
9819 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9820 {
9821 iemInitDecoder(pVCpu, false, false);
9822#ifdef IEM_WITH_CODE_TLB
9823 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9824 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9825 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9826 pVCpu->iem.s.offCurInstrStart = 0;
9827 pVCpu->iem.s.offInstrNextByte = 0;
9828#else
9829 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9830 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9831#endif
9832 rcStrict = VINF_SUCCESS;
9833 }
9834 else
9835 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9836 if (rcStrict == VINF_SUCCESS)
9837 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9838 else if (pVCpu->iem.s.cActiveMappings > 0)
9839 iemMemRollback(pVCpu);
9840
9841 return rcStrict;
9842}
9843
9844
9845VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9846{
9847 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9848 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9849 if (rcStrict == VINF_SUCCESS)
9850 {
9851 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9852 if (pcbWritten)
9853 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9854 }
9855 else if (pVCpu->iem.s.cActiveMappings > 0)
9856 iemMemRollback(pVCpu);
9857
9858 return rcStrict;
9859}
9860
9861
9862VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9863 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9864{
9865 VBOXSTRICTRC rcStrict;
9866 if ( cbOpcodeBytes
9867 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9868 {
9869 iemInitDecoder(pVCpu, true, false);
9870#ifdef IEM_WITH_CODE_TLB
9871 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9872 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9873 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9874 pVCpu->iem.s.offCurInstrStart = 0;
9875 pVCpu->iem.s.offInstrNextByte = 0;
9876#else
9877 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9878 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9879#endif
9880 rcStrict = VINF_SUCCESS;
9881 }
9882 else
9883 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9884 if (rcStrict == VINF_SUCCESS)
9885 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9886 else if (pVCpu->iem.s.cActiveMappings > 0)
9887 iemMemRollback(pVCpu);
9888
9889 return rcStrict;
9890}
9891
9892
9893/**
9894 * For handling split cacheline lock operations when the host has split-lock
9895 * detection enabled.
9896 *
9897 * This will cause the interpreter to disregard the lock prefix and implicit
9898 * locking (xchg).
9899 *
9900 * @returns Strict VBox status code.
9901 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9902 */
9903VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9904{
9905 /*
9906 * Do the decoding and emulation.
9907 */
9908 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9909 if (rcStrict == VINF_SUCCESS)
9910 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9911 else if (pVCpu->iem.s.cActiveMappings > 0)
9912 iemMemRollback(pVCpu);
9913
9914 if (rcStrict != VINF_SUCCESS)
9915 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9916 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9917 return rcStrict;
9918}
9919
9920
9921VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9922{
9923 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9924 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9925
9926 /*
9927 * See if there is an interrupt pending in TRPM, inject it if we can.
9928 */
9929 /** @todo What if we are injecting an exception and not an interrupt? Is that
9930 * possible here? For now we assert it is indeed only an interrupt. */
9931 if (!TRPMHasTrap(pVCpu))
9932 { /* likely */ }
9933 else
9934 {
9935 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9936 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9937 {
9938 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9939#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9940 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9941 if (fIntrEnabled)
9942 {
9943 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9944 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9945 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9946 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9947 else
9948 {
9949 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9950 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9951 }
9952 }
9953#else
9954 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9955#endif
9956 if (fIntrEnabled)
9957 {
9958 uint8_t u8TrapNo;
9959 TRPMEVENT enmType;
9960 uint32_t uErrCode;
9961 RTGCPTR uCr2;
9962 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9963 AssertRC(rc2);
9964 Assert(enmType == TRPM_HARDWARE_INT);
9965 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9966
9967 TRPMResetTrap(pVCpu);
9968
9969#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9970 /* Injecting an event may cause a VM-exit. */
9971 if ( rcStrict != VINF_SUCCESS
9972 && rcStrict != VINF_IEM_RAISED_XCPT)
9973 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9974#else
9975 NOREF(rcStrict);
9976#endif
9977 }
9978 }
9979 }
9980
9981 /*
9982 * Initial decoder init w/ prefetch, then setup setjmp.
9983 */
9984 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9985 if (rcStrict == VINF_SUCCESS)
9986 {
9987#ifdef IEM_WITH_SETJMP
9988 jmp_buf JmpBuf;
9989 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9990 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9991 pVCpu->iem.s.cActiveMappings = 0;
9992 if ((rcStrict = setjmp(JmpBuf)) == 0)
9993#endif
9994 {
9995 /*
9996 * The run loop. We limit ourselves to 4096 instructions right now.
9997 */
9998 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9999 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10000 for (;;)
10001 {
10002 /*
10003 * Log the state.
10004 */
10005#ifdef LOG_ENABLED
10006 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10007#endif
10008
10009 /*
10010 * Do the decoding and emulation.
10011 */
10012 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10013 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10014#ifdef VBOX_STRICT
10015 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10016#endif
10017 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10018 {
10019 Assert(pVCpu->iem.s.cActiveMappings == 0);
10020 pVCpu->iem.s.cInstructions++;
10021
10022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10023 /* Perform any VMX nested-guest instruction boundary actions. */
10024 uint64_t fCpu = pVCpu->fLocalForcedActions;
10025 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10026 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10027 { /* likely */ }
10028 else
10029 {
10030 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10032 fCpu = pVCpu->fLocalForcedActions;
10033 else
10034 {
10035 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10036 break;
10037 }
10038 }
10039#endif
10040 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10041 {
10042#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10043 uint64_t fCpu = pVCpu->fLocalForcedActions;
10044#endif
10045 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10046 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10047 | VMCPU_FF_TLB_FLUSH
10048 | VMCPU_FF_UNHALT );
10049
10050 if (RT_LIKELY( ( !fCpu
10051 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10052 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10053 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10054 {
10055 if (cMaxInstructionsGccStupidity-- > 0)
10056 {
10057 /* Poll timers every now an then according to the caller's specs. */
10058 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10059 || !TMTimerPollBool(pVM, pVCpu))
10060 {
10061 Assert(pVCpu->iem.s.cActiveMappings == 0);
10062 iemReInitDecoder(pVCpu);
10063 continue;
10064 }
10065 }
10066 }
10067 }
10068 Assert(pVCpu->iem.s.cActiveMappings == 0);
10069 }
10070 else if (pVCpu->iem.s.cActiveMappings > 0)
10071 iemMemRollback(pVCpu);
10072 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10073 break;
10074 }
10075 }
10076#ifdef IEM_WITH_SETJMP
10077 else
10078 {
10079 if (pVCpu->iem.s.cActiveMappings > 0)
10080 iemMemRollback(pVCpu);
10081# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10082 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10083# endif
10084 pVCpu->iem.s.cLongJumps++;
10085 }
10086 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10087#endif
10088
10089 /*
10090 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10091 */
10092 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10093 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10094 }
10095 else
10096 {
10097 if (pVCpu->iem.s.cActiveMappings > 0)
10098 iemMemRollback(pVCpu);
10099
10100#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10101 /*
10102 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10103 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10104 */
10105 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10106#endif
10107 }
10108
10109 /*
10110 * Maybe re-enter raw-mode and log.
10111 */
10112 if (rcStrict != VINF_SUCCESS)
10113 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10115 if (pcInstructions)
10116 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10117 return rcStrict;
10118}
10119
10120
10121/**
10122 * Interface used by EMExecuteExec, does exit statistics and limits.
10123 *
10124 * @returns Strict VBox status code.
10125 * @param pVCpu The cross context virtual CPU structure.
10126 * @param fWillExit To be defined.
10127 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10128 * @param cMaxInstructions Maximum number of instructions to execute.
10129 * @param cMaxInstructionsWithoutExits
10130 * The max number of instructions without exits.
10131 * @param pStats Where to return statistics.
10132 */
10133VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10134 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10135{
10136 NOREF(fWillExit); /** @todo define flexible exit crits */
10137
10138 /*
10139 * Initialize return stats.
10140 */
10141 pStats->cInstructions = 0;
10142 pStats->cExits = 0;
10143 pStats->cMaxExitDistance = 0;
10144 pStats->cReserved = 0;
10145
10146 /*
10147 * Initial decoder init w/ prefetch, then setup setjmp.
10148 */
10149 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10150 if (rcStrict == VINF_SUCCESS)
10151 {
10152#ifdef IEM_WITH_SETJMP
10153 jmp_buf JmpBuf;
10154 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10155 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10156 pVCpu->iem.s.cActiveMappings = 0;
10157 if ((rcStrict = setjmp(JmpBuf)) == 0)
10158#endif
10159 {
10160#ifdef IN_RING0
10161 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10162#endif
10163 uint32_t cInstructionSinceLastExit = 0;
10164
10165 /*
10166 * The run loop. We limit ourselves to 4096 instructions right now.
10167 */
10168 PVM pVM = pVCpu->CTX_SUFF(pVM);
10169 for (;;)
10170 {
10171 /*
10172 * Log the state.
10173 */
10174#ifdef LOG_ENABLED
10175 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10176#endif
10177
10178 /*
10179 * Do the decoding and emulation.
10180 */
10181 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10182
10183 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10184 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10185
10186 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10187 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10188 {
10189 pStats->cExits += 1;
10190 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10191 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10192 cInstructionSinceLastExit = 0;
10193 }
10194
10195 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10196 {
10197 Assert(pVCpu->iem.s.cActiveMappings == 0);
10198 pVCpu->iem.s.cInstructions++;
10199 pStats->cInstructions++;
10200 cInstructionSinceLastExit++;
10201
10202#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10203 /* Perform any VMX nested-guest instruction boundary actions. */
10204 uint64_t fCpu = pVCpu->fLocalForcedActions;
10205 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10206 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10207 { /* likely */ }
10208 else
10209 {
10210 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10211 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10212 fCpu = pVCpu->fLocalForcedActions;
10213 else
10214 {
10215 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10216 break;
10217 }
10218 }
10219#endif
10220 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10221 {
10222#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10223 uint64_t fCpu = pVCpu->fLocalForcedActions;
10224#endif
10225 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10226 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10227 | VMCPU_FF_TLB_FLUSH
10228 | VMCPU_FF_UNHALT );
10229 if (RT_LIKELY( ( ( !fCpu
10230 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10231 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10232 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10233 || pStats->cInstructions < cMinInstructions))
10234 {
10235 if (pStats->cInstructions < cMaxInstructions)
10236 {
10237 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10238 {
10239#ifdef IN_RING0
10240 if ( !fCheckPreemptionPending
10241 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10242#endif
10243 {
10244 Assert(pVCpu->iem.s.cActiveMappings == 0);
10245 iemReInitDecoder(pVCpu);
10246 continue;
10247 }
10248#ifdef IN_RING0
10249 rcStrict = VINF_EM_RAW_INTERRUPT;
10250 break;
10251#endif
10252 }
10253 }
10254 }
10255 Assert(!(fCpu & VMCPU_FF_IEM));
10256 }
10257 Assert(pVCpu->iem.s.cActiveMappings == 0);
10258 }
10259 else if (pVCpu->iem.s.cActiveMappings > 0)
10260 iemMemRollback(pVCpu);
10261 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10262 break;
10263 }
10264 }
10265#ifdef IEM_WITH_SETJMP
10266 else
10267 {
10268 if (pVCpu->iem.s.cActiveMappings > 0)
10269 iemMemRollback(pVCpu);
10270 pVCpu->iem.s.cLongJumps++;
10271 }
10272 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10273#endif
10274
10275 /*
10276 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10277 */
10278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10280 }
10281 else
10282 {
10283 if (pVCpu->iem.s.cActiveMappings > 0)
10284 iemMemRollback(pVCpu);
10285
10286#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10287 /*
10288 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10289 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10290 */
10291 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10292#endif
10293 }
10294
10295 /*
10296 * Maybe re-enter raw-mode and log.
10297 */
10298 if (rcStrict != VINF_SUCCESS)
10299 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10300 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10301 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10302 return rcStrict;
10303}
10304
10305
10306/**
10307 * Injects a trap, fault, abort, software interrupt or external interrupt.
10308 *
10309 * The parameter list matches TRPMQueryTrapAll pretty closely.
10310 *
10311 * @returns Strict VBox status code.
10312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10313 * @param u8TrapNo The trap number.
10314 * @param enmType What type is it (trap/fault/abort), software
10315 * interrupt or hardware interrupt.
10316 * @param uErrCode The error code if applicable.
10317 * @param uCr2 The CR2 value if applicable.
10318 * @param cbInstr The instruction length (only relevant for
10319 * software interrupts).
10320 */
10321VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10322 uint8_t cbInstr)
10323{
10324 iemInitDecoder(pVCpu, false, false);
10325#ifdef DBGFTRACE_ENABLED
10326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10327 u8TrapNo, enmType, uErrCode, uCr2);
10328#endif
10329
10330 uint32_t fFlags;
10331 switch (enmType)
10332 {
10333 case TRPM_HARDWARE_INT:
10334 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10335 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10336 uErrCode = uCr2 = 0;
10337 break;
10338
10339 case TRPM_SOFTWARE_INT:
10340 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10341 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10342 uErrCode = uCr2 = 0;
10343 break;
10344
10345 case TRPM_TRAP:
10346 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10347 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10348 if (u8TrapNo == X86_XCPT_PF)
10349 fFlags |= IEM_XCPT_FLAGS_CR2;
10350 switch (u8TrapNo)
10351 {
10352 case X86_XCPT_DF:
10353 case X86_XCPT_TS:
10354 case X86_XCPT_NP:
10355 case X86_XCPT_SS:
10356 case X86_XCPT_PF:
10357 case X86_XCPT_AC:
10358 case X86_XCPT_GP:
10359 fFlags |= IEM_XCPT_FLAGS_ERR;
10360 break;
10361 }
10362 break;
10363
10364 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10365 }
10366
10367 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10368
10369 if (pVCpu->iem.s.cActiveMappings > 0)
10370 iemMemRollback(pVCpu);
10371
10372 return rcStrict;
10373}
10374
10375
10376/**
10377 * Injects the active TRPM event.
10378 *
10379 * @returns Strict VBox status code.
10380 * @param pVCpu The cross context virtual CPU structure.
10381 */
10382VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10383{
10384#ifndef IEM_IMPLEMENTS_TASKSWITCH
10385 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10386#else
10387 uint8_t u8TrapNo;
10388 TRPMEVENT enmType;
10389 uint32_t uErrCode;
10390 RTGCUINTPTR uCr2;
10391 uint8_t cbInstr;
10392 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10393 if (RT_FAILURE(rc))
10394 return rc;
10395
10396 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10397 * ICEBP \#DB injection as a special case. */
10398 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10399#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10400 if (rcStrict == VINF_SVM_VMEXIT)
10401 rcStrict = VINF_SUCCESS;
10402#endif
10403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10404 if (rcStrict == VINF_VMX_VMEXIT)
10405 rcStrict = VINF_SUCCESS;
10406#endif
10407 /** @todo Are there any other codes that imply the event was successfully
10408 * delivered to the guest? See @bugref{6607}. */
10409 if ( rcStrict == VINF_SUCCESS
10410 || rcStrict == VINF_IEM_RAISED_XCPT)
10411 TRPMResetTrap(pVCpu);
10412
10413 return rcStrict;
10414#endif
10415}
10416
10417
10418VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10419{
10420 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10421 return VERR_NOT_IMPLEMENTED;
10422}
10423
10424
10425VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10426{
10427 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10428 return VERR_NOT_IMPLEMENTED;
10429}
10430
10431
10432/**
10433 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10434 *
10435 * This API ASSUMES that the caller has already verified that the guest code is
10436 * allowed to access the I/O port. (The I/O port is in the DX register in the
10437 * guest state.)
10438 *
10439 * @returns Strict VBox status code.
10440 * @param pVCpu The cross context virtual CPU structure.
10441 * @param cbValue The size of the I/O port access (1, 2, or 4).
10442 * @param enmAddrMode The addressing mode.
10443 * @param fRepPrefix Indicates whether a repeat prefix is used
10444 * (doesn't matter which for this instruction).
10445 * @param cbInstr The instruction length in bytes.
10446 * @param iEffSeg The effective segment address.
10447 * @param fIoChecked Whether the access to the I/O port has been
10448 * checked or not. It's typically checked in the
10449 * HM scenario.
10450 */
10451VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10452 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10453{
10454 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10456
10457 /*
10458 * State init.
10459 */
10460 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10461
10462 /*
10463 * Switch orgy for getting to the right handler.
10464 */
10465 VBOXSTRICTRC rcStrict;
10466 if (fRepPrefix)
10467 {
10468 switch (enmAddrMode)
10469 {
10470 case IEMMODE_16BIT:
10471 switch (cbValue)
10472 {
10473 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10474 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10475 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10476 default:
10477 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10478 }
10479 break;
10480
10481 case IEMMODE_32BIT:
10482 switch (cbValue)
10483 {
10484 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10485 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10486 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10487 default:
10488 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10489 }
10490 break;
10491
10492 case IEMMODE_64BIT:
10493 switch (cbValue)
10494 {
10495 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10496 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10497 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10498 default:
10499 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10500 }
10501 break;
10502
10503 default:
10504 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10505 }
10506 }
10507 else
10508 {
10509 switch (enmAddrMode)
10510 {
10511 case IEMMODE_16BIT:
10512 switch (cbValue)
10513 {
10514 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10515 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10516 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10517 default:
10518 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10519 }
10520 break;
10521
10522 case IEMMODE_32BIT:
10523 switch (cbValue)
10524 {
10525 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10526 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10527 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10528 default:
10529 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10530 }
10531 break;
10532
10533 case IEMMODE_64BIT:
10534 switch (cbValue)
10535 {
10536 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10537 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10538 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10539 default:
10540 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10541 }
10542 break;
10543
10544 default:
10545 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10546 }
10547 }
10548
10549 if (pVCpu->iem.s.cActiveMappings)
10550 iemMemRollback(pVCpu);
10551
10552 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10553}
10554
10555
10556/**
10557 * Interface for HM and EM for executing string I/O IN (read) instructions.
10558 *
10559 * This API ASSUMES that the caller has already verified that the guest code is
10560 * allowed to access the I/O port. (The I/O port is in the DX register in the
10561 * guest state.)
10562 *
10563 * @returns Strict VBox status code.
10564 * @param pVCpu The cross context virtual CPU structure.
10565 * @param cbValue The size of the I/O port access (1, 2, or 4).
10566 * @param enmAddrMode The addressing mode.
10567 * @param fRepPrefix Indicates whether a repeat prefix is used
10568 * (doesn't matter which for this instruction).
10569 * @param cbInstr The instruction length in bytes.
10570 * @param fIoChecked Whether the access to the I/O port has been
10571 * checked or not. It's typically checked in the
10572 * HM scenario.
10573 */
10574VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10575 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10576{
10577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10578
10579 /*
10580 * State init.
10581 */
10582 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10583
10584 /*
10585 * Switch orgy for getting to the right handler.
10586 */
10587 VBOXSTRICTRC rcStrict;
10588 if (fRepPrefix)
10589 {
10590 switch (enmAddrMode)
10591 {
10592 case IEMMODE_16BIT:
10593 switch (cbValue)
10594 {
10595 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10596 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10597 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10598 default:
10599 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10600 }
10601 break;
10602
10603 case IEMMODE_32BIT:
10604 switch (cbValue)
10605 {
10606 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10607 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10608 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10609 default:
10610 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10611 }
10612 break;
10613
10614 case IEMMODE_64BIT:
10615 switch (cbValue)
10616 {
10617 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10618 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10619 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10620 default:
10621 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10622 }
10623 break;
10624
10625 default:
10626 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10627 }
10628 }
10629 else
10630 {
10631 switch (enmAddrMode)
10632 {
10633 case IEMMODE_16BIT:
10634 switch (cbValue)
10635 {
10636 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10637 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10638 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10639 default:
10640 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10641 }
10642 break;
10643
10644 case IEMMODE_32BIT:
10645 switch (cbValue)
10646 {
10647 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10648 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10649 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10650 default:
10651 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10652 }
10653 break;
10654
10655 case IEMMODE_64BIT:
10656 switch (cbValue)
10657 {
10658 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10659 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10660 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10661 default:
10662 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10663 }
10664 break;
10665
10666 default:
10667 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10668 }
10669 }
10670
10671 if ( pVCpu->iem.s.cActiveMappings == 0
10672 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10673 { /* likely */ }
10674 else
10675 {
10676 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10677 iemMemRollback(pVCpu);
10678 }
10679 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10680}
10681
10682
10683/**
10684 * Interface for rawmode to write execute an OUT instruction.
10685 *
10686 * @returns Strict VBox status code.
10687 * @param pVCpu The cross context virtual CPU structure.
10688 * @param cbInstr The instruction length in bytes.
10689 * @param u16Port The port to read.
10690 * @param fImm Whether the port is specified using an immediate operand or
10691 * using the implicit DX register.
10692 * @param cbReg The register size.
10693 *
10694 * @remarks In ring-0 not all of the state needs to be synced in.
10695 */
10696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10697{
10698 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10699 Assert(cbReg <= 4 && cbReg != 3);
10700
10701 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for rawmode to write execute an IN instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @param pVCpu The cross context virtual CPU structure.
10713 * @param cbInstr The instruction length in bytes.
10714 * @param u16Port The port to read.
10715 * @param fImm Whether the port is specified using an immediate operand or
10716 * using the implicit DX.
10717 * @param cbReg The register size.
10718 */
10719VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10720{
10721 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10722 Assert(cbReg <= 4 && cbReg != 3);
10723
10724 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10725 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10726 Assert(!pVCpu->iem.s.cActiveMappings);
10727 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10728}
10729
10730
10731/**
10732 * Interface for HM and EM to write to a CRx register.
10733 *
10734 * @returns Strict VBox status code.
10735 * @param pVCpu The cross context virtual CPU structure.
10736 * @param cbInstr The instruction length in bytes.
10737 * @param iCrReg The control register number (destination).
10738 * @param iGReg The general purpose register number (source).
10739 *
10740 * @remarks In ring-0 not all of the state needs to be synced in.
10741 */
10742VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10743{
10744 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10745 Assert(iCrReg < 16);
10746 Assert(iGReg < 16);
10747
10748 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to read from a CRx register.
10757 *
10758 * @returns Strict VBox status code.
10759 * @param pVCpu The cross context virtual CPU structure.
10760 * @param cbInstr The instruction length in bytes.
10761 * @param iGReg The general purpose register number (destination).
10762 * @param iCrReg The control register number (source).
10763 *
10764 * @remarks In ring-0 not all of the state needs to be synced in.
10765 */
10766VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10767{
10768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10769 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10770 | CPUMCTX_EXTRN_APIC_TPR);
10771 Assert(iCrReg < 16);
10772 Assert(iGReg < 16);
10773
10774 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10775 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10776 Assert(!pVCpu->iem.s.cActiveMappings);
10777 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10778}
10779
10780
10781/**
10782 * Interface for HM and EM to write to a DRx register.
10783 *
10784 * @returns Strict VBox status code.
10785 * @param pVCpu The cross context virtual CPU structure.
10786 * @param cbInstr The instruction length in bytes.
10787 * @param iDrReg The debug register number (destination).
10788 * @param iGReg The general purpose register number (source).
10789 *
10790 * @remarks In ring-0 not all of the state needs to be synced in.
10791 */
10792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10793{
10794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10795 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
10796 Assert(iDrReg < 8);
10797 Assert(iGReg < 16);
10798
10799 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10800 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10801 Assert(!pVCpu->iem.s.cActiveMappings);
10802 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10803}
10804
10805
10806/**
10807 * Interface for HM and EM to read from a DRx register.
10808 *
10809 * @returns Strict VBox status code.
10810 * @param pVCpu The cross context virtual CPU structure.
10811 * @param cbInstr The instruction length in bytes.
10812 * @param iGReg The general purpose register number (destination).
10813 * @param iDrReg The debug register number (source).
10814 *
10815 * @remarks In ring-0 not all of the state needs to be synced in.
10816 */
10817VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10818{
10819 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10820 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
10821 Assert(iDrReg < 8);
10822 Assert(iGReg < 16);
10823
10824 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10825 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10826 Assert(!pVCpu->iem.s.cActiveMappings);
10827 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10828}
10829
10830
10831/**
10832 * Interface for HM and EM to clear the CR0[TS] bit.
10833 *
10834 * @returns Strict VBox status code.
10835 * @param pVCpu The cross context virtual CPU structure.
10836 * @param cbInstr The instruction length in bytes.
10837 *
10838 * @remarks In ring-0 not all of the state needs to be synced in.
10839 */
10840VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10841{
10842 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10843
10844 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10845 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10846 Assert(!pVCpu->iem.s.cActiveMappings);
10847 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10848}
10849
10850
10851/**
10852 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10853 *
10854 * @returns Strict VBox status code.
10855 * @param pVCpu The cross context virtual CPU structure.
10856 * @param cbInstr The instruction length in bytes.
10857 * @param uValue The value to load into CR0.
10858 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10859 * memory operand. Otherwise pass NIL_RTGCPTR.
10860 *
10861 * @remarks In ring-0 not all of the state needs to be synced in.
10862 */
10863VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10864{
10865 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10866
10867 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10868 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10869 Assert(!pVCpu->iem.s.cActiveMappings);
10870 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10871}
10872
10873
10874/**
10875 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10876 *
10877 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10878 *
10879 * @returns Strict VBox status code.
10880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10881 * @param cbInstr The instruction length in bytes.
10882 * @remarks In ring-0 not all of the state needs to be synced in.
10883 * @thread EMT(pVCpu)
10884 */
10885VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10886{
10887 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10888
10889 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10890 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10891 Assert(!pVCpu->iem.s.cActiveMappings);
10892 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10893}
10894
10895
10896/**
10897 * Interface for HM and EM to emulate the WBINVD instruction.
10898 *
10899 * @returns Strict VBox status code.
10900 * @param pVCpu The cross context virtual CPU structure.
10901 * @param cbInstr The instruction length in bytes.
10902 *
10903 * @remarks In ring-0 not all of the state needs to be synced in.
10904 */
10905VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10906{
10907 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10908
10909 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10910 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10911 Assert(!pVCpu->iem.s.cActiveMappings);
10912 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10913}
10914
10915
10916/**
10917 * Interface for HM and EM to emulate the INVD instruction.
10918 *
10919 * @returns Strict VBox status code.
10920 * @param pVCpu The cross context virtual CPU structure.
10921 * @param cbInstr The instruction length in bytes.
10922 *
10923 * @remarks In ring-0 not all of the state needs to be synced in.
10924 */
10925VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10926{
10927 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10928
10929 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10930 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10931 Assert(!pVCpu->iem.s.cActiveMappings);
10932 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10933}
10934
10935
10936/**
10937 * Interface for HM and EM to emulate the INVLPG instruction.
10938 *
10939 * @returns Strict VBox status code.
10940 * @retval VINF_PGM_SYNC_CR3
10941 *
10942 * @param pVCpu The cross context virtual CPU structure.
10943 * @param cbInstr The instruction length in bytes.
10944 * @param GCPtrPage The effective address of the page to invalidate.
10945 *
10946 * @remarks In ring-0 not all of the state needs to be synced in.
10947 */
10948VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10949{
10950 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10951
10952 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10953 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10954 Assert(!pVCpu->iem.s.cActiveMappings);
10955 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10956}
10957
10958
10959/**
10960 * Interface for HM and EM to emulate the INVPCID instruction.
10961 *
10962 * @returns Strict VBox status code.
10963 * @retval VINF_PGM_SYNC_CR3
10964 *
10965 * @param pVCpu The cross context virtual CPU structure.
10966 * @param cbInstr The instruction length in bytes.
10967 * @param iEffSeg The effective segment register.
10968 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10969 * @param uType The invalidation type.
10970 *
10971 * @remarks In ring-0 not all of the state needs to be synced in.
10972 */
10973VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10974 uint64_t uType)
10975{
10976 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10977
10978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10980 Assert(!pVCpu->iem.s.cActiveMappings);
10981 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10982}
10983
10984
10985/**
10986 * Interface for HM and EM to emulate the CPUID instruction.
10987 *
10988 * @returns Strict VBox status code.
10989 *
10990 * @param pVCpu The cross context virtual CPU structure.
10991 * @param cbInstr The instruction length in bytes.
10992 *
10993 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10994 */
10995VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10996{
10997 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10998 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10999
11000 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11001 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11002 Assert(!pVCpu->iem.s.cActiveMappings);
11003 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11004}
11005
11006
11007/**
11008 * Interface for HM and EM to emulate the RDPMC instruction.
11009 *
11010 * @returns Strict VBox status code.
11011 *
11012 * @param pVCpu The cross context virtual CPU structure.
11013 * @param cbInstr The instruction length in bytes.
11014 *
11015 * @remarks Not all of the state needs to be synced in.
11016 */
11017VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11018{
11019 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11020 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11021
11022 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11023 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11024 Assert(!pVCpu->iem.s.cActiveMappings);
11025 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11026}
11027
11028
11029/**
11030 * Interface for HM and EM to emulate the RDTSC instruction.
11031 *
11032 * @returns Strict VBox status code.
11033 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11034 *
11035 * @param pVCpu The cross context virtual CPU structure.
11036 * @param cbInstr The instruction length in bytes.
11037 *
11038 * @remarks Not all of the state needs to be synced in.
11039 */
11040VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11041{
11042 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11043 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11044
11045 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11046 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11047 Assert(!pVCpu->iem.s.cActiveMappings);
11048 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11049}
11050
11051
11052/**
11053 * Interface for HM and EM to emulate the RDTSCP instruction.
11054 *
11055 * @returns Strict VBox status code.
11056 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11057 *
11058 * @param pVCpu The cross context virtual CPU structure.
11059 * @param cbInstr The instruction length in bytes.
11060 *
11061 * @remarks Not all of the state needs to be synced in. Recommended
11062 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11063 */
11064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11065{
11066 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11067 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11068
11069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11071 Assert(!pVCpu->iem.s.cActiveMappings);
11072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11073}
11074
11075
11076/**
11077 * Interface for HM and EM to emulate the RDMSR instruction.
11078 *
11079 * @returns Strict VBox status code.
11080 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11081 *
11082 * @param pVCpu The cross context virtual CPU structure.
11083 * @param cbInstr The instruction length in bytes.
11084 *
11085 * @remarks Not all of the state needs to be synced in. Requires RCX and
11086 * (currently) all MSRs.
11087 */
11088VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11089{
11090 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11091 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11092
11093 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11094 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11095 Assert(!pVCpu->iem.s.cActiveMappings);
11096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11097}
11098
11099
11100/**
11101 * Interface for HM and EM to emulate the WRMSR instruction.
11102 *
11103 * @returns Strict VBox status code.
11104 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11105 *
11106 * @param pVCpu The cross context virtual CPU structure.
11107 * @param cbInstr The instruction length in bytes.
11108 *
11109 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11110 * and (currently) all MSRs.
11111 */
11112VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11113{
11114 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11115 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11116 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11117
11118 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11119 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11120 Assert(!pVCpu->iem.s.cActiveMappings);
11121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11122}
11123
11124
11125/**
11126 * Interface for HM and EM to emulate the MONITOR instruction.
11127 *
11128 * @returns Strict VBox status code.
11129 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11130 *
11131 * @param pVCpu The cross context virtual CPU structure.
11132 * @param cbInstr The instruction length in bytes.
11133 *
11134 * @remarks Not all of the state needs to be synced in.
11135 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11136 * are used.
11137 */
11138VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11139{
11140 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11141 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11142
11143 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11144 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11145 Assert(!pVCpu->iem.s.cActiveMappings);
11146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11147}
11148
11149
11150/**
11151 * Interface for HM and EM to emulate the MWAIT instruction.
11152 *
11153 * @returns Strict VBox status code.
11154 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11155 *
11156 * @param pVCpu The cross context virtual CPU structure.
11157 * @param cbInstr The instruction length in bytes.
11158 *
11159 * @remarks Not all of the state needs to be synced in.
11160 */
11161VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11162{
11163 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11164 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11165
11166 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11167 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11168 Assert(!pVCpu->iem.s.cActiveMappings);
11169 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11170}
11171
11172
11173/**
11174 * Interface for HM and EM to emulate the HLT instruction.
11175 *
11176 * @returns Strict VBox status code.
11177 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11178 *
11179 * @param pVCpu The cross context virtual CPU structure.
11180 * @param cbInstr The instruction length in bytes.
11181 *
11182 * @remarks Not all of the state needs to be synced in.
11183 */
11184VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11185{
11186 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11187
11188 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11189 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11190 Assert(!pVCpu->iem.s.cActiveMappings);
11191 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11192}
11193
11194
11195/**
11196 * Checks if IEM is in the process of delivering an event (interrupt or
11197 * exception).
11198 *
11199 * @returns true if we're in the process of raising an interrupt or exception,
11200 * false otherwise.
11201 * @param pVCpu The cross context virtual CPU structure.
11202 * @param puVector Where to store the vector associated with the
11203 * currently delivered event, optional.
11204 * @param pfFlags Where to store th event delivery flags (see
11205 * IEM_XCPT_FLAGS_XXX), optional.
11206 * @param puErr Where to store the error code associated with the
11207 * event, optional.
11208 * @param puCr2 Where to store the CR2 associated with the event,
11209 * optional.
11210 * @remarks The caller should check the flags to determine if the error code and
11211 * CR2 are valid for the event.
11212 */
11213VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11214{
11215 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11216 if (fRaisingXcpt)
11217 {
11218 if (puVector)
11219 *puVector = pVCpu->iem.s.uCurXcpt;
11220 if (pfFlags)
11221 *pfFlags = pVCpu->iem.s.fCurXcpt;
11222 if (puErr)
11223 *puErr = pVCpu->iem.s.uCurXcptErr;
11224 if (puCr2)
11225 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11226 }
11227 return fRaisingXcpt;
11228}
11229
11230#ifdef IN_RING3
11231
11232/**
11233 * Handles the unlikely and probably fatal merge cases.
11234 *
11235 * @returns Merged status code.
11236 * @param rcStrict Current EM status code.
11237 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11238 * with @a rcStrict.
11239 * @param iMemMap The memory mapping index. For error reporting only.
11240 * @param pVCpu The cross context virtual CPU structure of the calling
11241 * thread, for error reporting only.
11242 */
11243DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11244 unsigned iMemMap, PVMCPUCC pVCpu)
11245{
11246 if (RT_FAILURE_NP(rcStrict))
11247 return rcStrict;
11248
11249 if (RT_FAILURE_NP(rcStrictCommit))
11250 return rcStrictCommit;
11251
11252 if (rcStrict == rcStrictCommit)
11253 return rcStrictCommit;
11254
11255 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11256 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11257 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11258 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11259 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11260 return VERR_IOM_FF_STATUS_IPE;
11261}
11262
11263
11264/**
11265 * Helper for IOMR3ProcessForceFlag.
11266 *
11267 * @returns Merged status code.
11268 * @param rcStrict Current EM status code.
11269 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11270 * with @a rcStrict.
11271 * @param iMemMap The memory mapping index. For error reporting only.
11272 * @param pVCpu The cross context virtual CPU structure of the calling
11273 * thread, for error reporting only.
11274 */
11275DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11276{
11277 /* Simple. */
11278 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11279 return rcStrictCommit;
11280
11281 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11282 return rcStrict;
11283
11284 /* EM scheduling status codes. */
11285 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11286 && rcStrict <= VINF_EM_LAST))
11287 {
11288 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11289 && rcStrictCommit <= VINF_EM_LAST))
11290 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11291 }
11292
11293 /* Unlikely */
11294 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11295}
11296
11297
11298/**
11299 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11300 *
11301 * @returns Merge between @a rcStrict and what the commit operation returned.
11302 * @param pVM The cross context VM structure.
11303 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11304 * @param rcStrict The status code returned by ring-0 or raw-mode.
11305 */
11306VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11307{
11308 /*
11309 * Reset the pending commit.
11310 */
11311 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11312 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11313 ("%#x %#x %#x\n",
11314 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11315 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11316
11317 /*
11318 * Commit the pending bounce buffers (usually just one).
11319 */
11320 unsigned cBufs = 0;
11321 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11322 while (iMemMap-- > 0)
11323 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11324 {
11325 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11326 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11327 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11328
11329 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11330 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11331 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11332
11333 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11334 {
11335 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11336 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11337 pbBuf,
11338 cbFirst,
11339 PGMACCESSORIGIN_IEM);
11340 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11341 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11342 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11343 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11344 }
11345
11346 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11347 {
11348 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11349 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11350 pbBuf + cbFirst,
11351 cbSecond,
11352 PGMACCESSORIGIN_IEM);
11353 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11354 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11355 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11356 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11357 }
11358 cBufs++;
11359 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11360 }
11361
11362 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11363 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11364 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11365 pVCpu->iem.s.cActiveMappings = 0;
11366 return rcStrict;
11367}
11368
11369#endif /* IN_RING3 */
11370
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette