VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 97471

Last change on this file since 97471 was 97471, checked in by vboxsync, 2 years ago

VMM/IEM: Build fix for VBOX_WITH_IEM_TLB on windows. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 462.5 KB
Line 
1/* $Id: IEMAll.cpp 97471 2022-11-09 00:29:45Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <VBox/disopcode.h>
130#include <iprt/asm-math.h>
131#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
132# include <iprt/asm-amd64-x86.h>
133#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
134# include <iprt/asm-arm.h>
135#endif
136#include <iprt/assert.h>
137#include <iprt/string.h>
138#include <iprt/x86.h>
139
140#include "IEMInline.h"
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * CPU exception classes.
148 */
149typedef enum IEMXCPTCLASS
150{
151 IEMXCPTCLASS_BENIGN,
152 IEMXCPTCLASS_CONTRIBUTORY,
153 IEMXCPTCLASS_PAGE_FAULT,
154 IEMXCPTCLASS_DOUBLE_FAULT
155} IEMXCPTCLASS;
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161#if defined(IEM_LOG_MEMORY_WRITES)
162/** What IEM just wrote. */
163uint8_t g_abIemWrote[256];
164/** How much IEM just wrote. */
165size_t g_cbIemWrote;
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
173 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
174
175
176/**
177 * Initializes the decoder state.
178 *
179 * iemReInitDecoder is mostly a copy of this function.
180 *
181 * @param pVCpu The cross context virtual CPU structure of the
182 * calling thread.
183 * @param fBypassHandlers Whether to bypass access handlers.
184 * @param fDisregardLock Whether to disregard the LOCK prefix.
185 */
186DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
187{
188 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
189 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
198
199 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
200 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
201 pVCpu->iem.s.enmCpuMode = enmMode;
202 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
203 pVCpu->iem.s.enmEffAddrMode = enmMode;
204 if (enmMode != IEMMODE_64BIT)
205 {
206 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
207 pVCpu->iem.s.enmEffOpSize = enmMode;
208 }
209 else
210 {
211 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
212 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
213 }
214 pVCpu->iem.s.fPrefixes = 0;
215 pVCpu->iem.s.uRexReg = 0;
216 pVCpu->iem.s.uRexB = 0;
217 pVCpu->iem.s.uRexIndex = 0;
218 pVCpu->iem.s.idxPrefix = 0;
219 pVCpu->iem.s.uVex3rdReg = 0;
220 pVCpu->iem.s.uVexLength = 0;
221 pVCpu->iem.s.fEvexStuff = 0;
222 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
223#ifdef IEM_WITH_CODE_TLB
224 pVCpu->iem.s.pbInstrBuf = NULL;
225 pVCpu->iem.s.offInstrNextByte = 0;
226 pVCpu->iem.s.offCurInstrStart = 0;
227# ifdef VBOX_STRICT
228 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
229 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
230 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
231# endif
232#else
233 pVCpu->iem.s.offOpcode = 0;
234 pVCpu->iem.s.cbOpcode = 0;
235#endif
236 pVCpu->iem.s.offModRm = 0;
237 pVCpu->iem.s.cActiveMappings = 0;
238 pVCpu->iem.s.iNextMapping = 0;
239 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
240 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
241 pVCpu->iem.s.fDisregardLock = fDisregardLock;
242
243#ifdef DBGFTRACE_ENABLED
244 switch (enmMode)
245 {
246 case IEMMODE_64BIT:
247 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
248 break;
249 case IEMMODE_32BIT:
250 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
251 break;
252 case IEMMODE_16BIT:
253 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
254 break;
255 }
256#endif
257}
258
259
260/**
261 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
262 *
263 * This is mostly a copy of iemInitDecoder.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
268{
269 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
278
279 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
280 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
281 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
282 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
283 pVCpu->iem.s.enmEffAddrMode = enmMode;
284 if (enmMode != IEMMODE_64BIT)
285 {
286 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffOpSize = enmMode;
288 }
289 else
290 {
291 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
292 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
293 }
294 pVCpu->iem.s.fPrefixes = 0;
295 pVCpu->iem.s.uRexReg = 0;
296 pVCpu->iem.s.uRexB = 0;
297 pVCpu->iem.s.uRexIndex = 0;
298 pVCpu->iem.s.idxPrefix = 0;
299 pVCpu->iem.s.uVex3rdReg = 0;
300 pVCpu->iem.s.uVexLength = 0;
301 pVCpu->iem.s.fEvexStuff = 0;
302 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
303#ifdef IEM_WITH_CODE_TLB
304 if (pVCpu->iem.s.pbInstrBuf)
305 {
306 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
307 - pVCpu->iem.s.uInstrBufPc;
308 if (off < pVCpu->iem.s.cbInstrBufTotal)
309 {
310 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316 }
317 else
318 {
319 pVCpu->iem.s.pbInstrBuf = NULL;
320 pVCpu->iem.s.offInstrNextByte = 0;
321 pVCpu->iem.s.offCurInstrStart = 0;
322 pVCpu->iem.s.cbInstrBuf = 0;
323 pVCpu->iem.s.cbInstrBufTotal = 0;
324 }
325 }
326 else
327 {
328 pVCpu->iem.s.offInstrNextByte = 0;
329 pVCpu->iem.s.offCurInstrStart = 0;
330 pVCpu->iem.s.cbInstrBuf = 0;
331 pVCpu->iem.s.cbInstrBufTotal = 0;
332 }
333#else
334 pVCpu->iem.s.cbOpcode = 0;
335 pVCpu->iem.s.offOpcode = 0;
336#endif
337 pVCpu->iem.s.offModRm = 0;
338 Assert(pVCpu->iem.s.cActiveMappings == 0);
339 pVCpu->iem.s.iNextMapping = 0;
340 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
341 Assert(pVCpu->iem.s.fBypassHandlers == false);
342
343#ifdef DBGFTRACE_ENABLED
344 switch (enmMode)
345 {
346 case IEMMODE_64BIT:
347 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
348 break;
349 case IEMMODE_32BIT:
350 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
351 break;
352 case IEMMODE_16BIT:
353 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
354 break;
355 }
356#endif
357}
358
359
360
361/**
362 * Prefetch opcodes the first time when starting executing.
363 *
364 * @returns Strict VBox status code.
365 * @param pVCpu The cross context virtual CPU structure of the
366 * calling thread.
367 * @param fBypassHandlers Whether to bypass access handlers.
368 * @param fDisregardLock Whether to disregard LOCK prefixes.
369 *
370 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
371 * store them as such.
372 */
373static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
374{
375 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
376
377#ifdef IEM_WITH_CODE_TLB
378 /** @todo Do ITLB lookup here. */
379
380#else /* !IEM_WITH_CODE_TLB */
381
382 /*
383 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
384 *
385 * First translate CS:rIP to a physical address.
386 */
387 uint32_t cbToTryRead;
388 RTGCPTR GCPtrPC;
389 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
390 {
391 cbToTryRead = GUEST_PAGE_SIZE;
392 GCPtrPC = pVCpu->cpum.GstCtx.rip;
393 if (IEM_IS_CANONICAL(GCPtrPC))
394 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
395 else
396 return iemRaiseGeneralProtectionFault0(pVCpu);
397 }
398 else
399 {
400 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
401 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
402 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
403 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
404 else
405 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
406 if (cbToTryRead) { /* likely */ }
407 else /* overflowed */
408 {
409 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
410 cbToTryRead = UINT32_MAX;
411 }
412 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
413 Assert(GCPtrPC <= UINT32_MAX);
414 }
415
416 PGMPTWALK Walk;
417 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
418 if (RT_SUCCESS(rc))
419 Assert(Walk.fSucceeded); /* probable. */
420 else
421 {
422 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
424 if (Walk.fFailed & PGM_WALKFAIL_EPT)
425 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
426#endif
427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
428 }
429 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
430 else
431 {
432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
436#endif
437 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
438 }
439 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
440 else
441 {
442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
445 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
446#endif
447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
448 }
449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
450 /** @todo Check reserved bits and such stuff. PGM is better at doing
451 * that, so do it when implementing the guest virtual address
452 * TLB... */
453
454 /*
455 * Read the bytes at this address.
456 */
457 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 if (cbToTryRead > cbLeftOnPage)
459 cbToTryRead = cbLeftOnPage;
460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
462
463 if (!pVCpu->iem.s.fBypassHandlers)
464 {
465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
467 { /* likely */ }
468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
469 {
470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
473 }
474 else
475 {
476 Log((RT_SUCCESS(rcStrict)
477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
480 return rcStrict;
481 }
482 }
483 else
484 {
485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
486 if (RT_SUCCESS(rc))
487 { /* likely */ }
488 else
489 {
490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
491 GCPtrPC, GCPhys, rc, cbToTryRead));
492 return rc;
493 }
494 }
495 pVCpu->iem.s.cbOpcode = cbToTryRead;
496#endif /* !IEM_WITH_CODE_TLB */
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Invalidates the IEM TLBs.
503 *
504 * This is called internally as well as by PGM when moving GC mappings.
505 *
506 * @returns
507 * @param pVCpu The cross context virtual CPU structure of the calling
508 * thread.
509 */
510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
511{
512#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
513 Log10(("IEMTlbInvalidateAll\n"));
514# ifdef IEM_WITH_CODE_TLB
515 pVCpu->iem.s.cbInstrBufTotal = 0;
516 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
517 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
518 { /* very likely */ }
519 else
520 {
521 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
522 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
523 while (i-- > 0)
524 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
525 }
526# endif
527
528# ifdef IEM_WITH_DATA_TLB
529 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
530 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
531 { /* very likely */ }
532 else
533 {
534 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
535 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
536 while (i-- > 0)
537 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
538 }
539# endif
540#else
541 RT_NOREF(pVCpu);
542#endif
543}
544
545
546/**
547 * Invalidates a page in the TLBs.
548 *
549 * @param pVCpu The cross context virtual CPU structure of the calling
550 * thread.
551 * @param GCPtr The address of the page to invalidate
552 * @thread EMT(pVCpu)
553 */
554VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
555{
556#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
557 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
558 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
559 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
560 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
561
562# ifdef IEM_WITH_CODE_TLB
563 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
564 {
565 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
566 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
567 pVCpu->iem.s.cbInstrBufTotal = 0;
568 }
569# endif
570
571# ifdef IEM_WITH_DATA_TLB
572 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
573 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
574# endif
575#else
576 NOREF(pVCpu); NOREF(GCPtr);
577#endif
578}
579
580
581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
582/**
583 * Invalid both TLBs slow fashion following a rollover.
584 *
585 * Worker for IEMTlbInvalidateAllPhysical,
586 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
587 * iemMemMapJmp and others.
588 *
589 * @thread EMT(pVCpu)
590 */
591static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
592{
593 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
594 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
595 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
596
597 unsigned i;
598# ifdef IEM_WITH_CODE_TLB
599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
600 while (i-- > 0)
601 {
602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
604 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
605 }
606# endif
607# ifdef IEM_WITH_DATA_TLB
608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
609 while (i-- > 0)
610 {
611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
613 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
614 }
615# endif
616
617}
618#endif
619
620
621/**
622 * Invalidates the host physical aspects of the IEM TLBs.
623 *
624 * This is called internally as well as by PGM when moving GC mappings.
625 *
626 * @param pVCpu The cross context virtual CPU structure of the calling
627 * thread.
628 * @note Currently not used.
629 */
630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
631{
632#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
633 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
634 Log10(("IEMTlbInvalidateAllPhysical\n"));
635
636# ifdef IEM_WITH_CODE_TLB
637 pVCpu->iem.s.cbInstrBufTotal = 0;
638# endif
639 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
640 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
641 {
642 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
643 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
644 }
645 else
646 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
647#else
648 NOREF(pVCpu);
649#endif
650}
651
652
653/**
654 * Invalidates the host physical aspects of the IEM TLBs.
655 *
656 * This is called internally as well as by PGM when moving GC mappings.
657 *
658 * @param pVM The cross context VM structure.
659 * @param idCpuCaller The ID of the calling EMT if available to the caller,
660 * otherwise NIL_VMCPUID.
661 *
662 * @remarks Caller holds the PGM lock.
663 */
664VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
665{
666#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
667 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
668 if (pVCpuCaller)
669 VMCPU_ASSERT_EMT(pVCpuCaller);
670 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
671
672 VMCC_FOR_EACH_VMCPU(pVM)
673 {
674# ifdef IEM_WITH_CODE_TLB
675 if (pVCpuCaller == pVCpu)
676 pVCpu->iem.s.cbInstrBufTotal = 0;
677# endif
678
679 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
680 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
681 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
682 { /* likely */}
683 else if (pVCpuCaller == pVCpu)
684 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
685 else
686 {
687 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
688 continue;
689 }
690 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
691 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
692 }
693 VMCC_FOR_EACH_VMCPU_END(pVM);
694
695#else
696 RT_NOREF(pVM, idCpuCaller);
697#endif
698}
699
700#ifdef IEM_WITH_CODE_TLB
701
702/**
703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
704 * failure and jumps.
705 *
706 * We end up here for a number of reasons:
707 * - pbInstrBuf isn't yet initialized.
708 * - Advancing beyond the buffer boundrary (e.g. cross page).
709 * - Advancing beyond the CS segment limit.
710 * - Fetching from non-mappable page (e.g. MMIO).
711 *
712 * @param pVCpu The cross context virtual CPU structure of the
713 * calling thread.
714 * @param pvDst Where to return the bytes.
715 * @param cbDst Number of bytes to read.
716 *
717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
718 */
719void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
720{
721#ifdef IN_RING3
722 for (;;)
723 {
724 Assert(cbDst <= 8);
725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
726
727 /*
728 * We might have a partial buffer match, deal with that first to make the
729 * rest simpler. This is the first part of the cross page/buffer case.
730 */
731 if (pVCpu->iem.s.pbInstrBuf != NULL)
732 {
733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
734 {
735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
738
739 cbDst -= cbCopy;
740 pvDst = (uint8_t *)pvDst + cbCopy;
741 offBuf += cbCopy;
742 pVCpu->iem.s.offInstrNextByte += offBuf;
743 }
744 }
745
746 /*
747 * Check segment limit, figuring how much we're allowed to access at this point.
748 *
749 * We will fault immediately if RIP is past the segment limit / in non-canonical
750 * territory. If we do continue, there are one or more bytes to read before we
751 * end up in trouble and we need to do that first before faulting.
752 */
753 RTGCPTR GCPtrFirst;
754 uint32_t cbMaxRead;
755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
756 {
757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
759 { /* likely */ }
760 else
761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
763 }
764 else
765 {
766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
767 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
769 { /* likely */ }
770 else /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
773 if (cbMaxRead != 0)
774 { /* likely */ }
775 else
776 {
777 /* Overflowed because address is 0 and limit is max. */
778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
779 cbMaxRead = X86_PAGE_SIZE;
780 }
781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
783 if (cbMaxRead2 < cbMaxRead)
784 cbMaxRead = cbMaxRead2;
785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
786 }
787
788 /*
789 * Get the TLB entry for this piece of code.
790 */
791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
793 if (pTlbe->uTag == uTag)
794 {
795 /* likely when executing lots of code, otherwise unlikely */
796# ifdef VBOX_WITH_STATISTICS
797 pVCpu->iem.s.CodeTlb.cTlbHits++;
798# endif
799 }
800 else
801 {
802 pVCpu->iem.s.CodeTlb.cTlbMisses++;
803 PGMPTWALK Walk;
804 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
805 if (RT_FAILURE(rc))
806 {
807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
808 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
809 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
810#endif
811 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
813 }
814
815 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
816 Assert(Walk.fSucceeded);
817 pTlbe->uTag = uTag;
818 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
819 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
820 pTlbe->GCPhys = Walk.GCPhys;
821 pTlbe->pbMappingR3 = NULL;
822 }
823
824 /*
825 * Check TLB page table level access flags.
826 */
827 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
828 {
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
830 {
831 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
833 }
834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
835 {
836 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
838 }
839 }
840
841 /*
842 * Look up the physical page info if necessary.
843 */
844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
845 { /* not necessary */ }
846 else
847 {
848 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
849 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
850 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
851 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
852 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
853 { /* likely */ }
854 else
855 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
856 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
857 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
858 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
859 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
860 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
861 }
862
863# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
864 /*
865 * Try do a direct read using the pbMappingR3 pointer.
866 */
867 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
868 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
869 {
870 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
871 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
872 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
873 {
874 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
875 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
876 }
877 else
878 {
879 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
880 Assert(cbInstr < cbMaxRead);
881 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
882 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
883 }
884 if (cbDst <= cbMaxRead)
885 {
886 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
887 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
888 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
889 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
890 return;
891 }
892 pVCpu->iem.s.pbInstrBuf = NULL;
893
894 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
895 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
896 }
897 else
898# endif
899#if 0
900 /*
901 * If there is no special read handling, so we can read a bit more and
902 * put it in the prefetch buffer.
903 */
904 if ( cbDst < cbMaxRead
905 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
906 {
907 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
908 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
909 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
910 { /* likely */ }
911 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
912 {
913 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
914 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
916 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
917 }
918 else
919 {
920 Log((RT_SUCCESS(rcStrict)
921 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
922 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
923 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
924 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
925 }
926 }
927 /*
928 * Special read handling, so only read exactly what's needed.
929 * This is a highly unlikely scenario.
930 */
931 else
932#endif
933 {
934 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
935 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
936 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
937 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
938 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
939 { /* likely */ }
940 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
943 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
944 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
945 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
946 }
947 else
948 {
949 Log((RT_SUCCESS(rcStrict)
950 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
951 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
952 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
953 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
954 }
955 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
956 if (cbToRead == cbDst)
957 return;
958 }
959
960 /*
961 * More to read, loop.
962 */
963 cbDst -= cbMaxRead;
964 pvDst = (uint8_t *)pvDst + cbMaxRead;
965 }
966#else
967 RT_NOREF(pvDst, cbDst);
968 if (pvDst || cbDst)
969 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
970#endif
971}
972
973#else
974
975/**
976 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
977 * exception if it fails.
978 *
979 * @returns Strict VBox status code.
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling thread.
982 * @param cbMin The minimum number of bytes relative offOpcode
983 * that must be read.
984 */
985VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
986{
987 /*
988 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
989 *
990 * First translate CS:rIP to a physical address.
991 */
992 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
993 uint32_t cbToTryRead;
994 RTGCPTR GCPtrNext;
995 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
996 {
997 cbToTryRead = GUEST_PAGE_SIZE;
998 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
999 if (!IEM_IS_CANONICAL(GCPtrNext))
1000 return iemRaiseGeneralProtectionFault0(pVCpu);
1001 }
1002 else
1003 {
1004 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1005 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1006 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1007 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1008 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1009 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1010 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1011 if (!cbToTryRead) /* overflowed */
1012 {
1013 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1014 cbToTryRead = UINT32_MAX;
1015 /** @todo check out wrapping around the code segment. */
1016 }
1017 if (cbToTryRead < cbMin - cbLeft)
1018 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1019 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1020 }
1021
1022 /* Only read up to the end of the page, and make sure we don't read more
1023 than the opcode buffer can hold. */
1024 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1025 if (cbToTryRead > cbLeftOnPage)
1026 cbToTryRead = cbLeftOnPage;
1027 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1028 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1029/** @todo r=bird: Convert assertion into undefined opcode exception? */
1030 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1031
1032 PGMPTWALK Walk;
1033 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1034 if (RT_FAILURE(rc))
1035 {
1036 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1037#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1038 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1039 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1040#endif
1041 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1042 }
1043 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1044 {
1045 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1046#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1047 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1048 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1049#endif
1050 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1051 }
1052 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1053 {
1054 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1055#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1056 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1057 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1058#endif
1059 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1060 }
1061 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1062 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1063 /** @todo Check reserved bits and such stuff. PGM is better at doing
1064 * that, so do it when implementing the guest virtual address
1065 * TLB... */
1066
1067 /*
1068 * Read the bytes at this address.
1069 *
1070 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1071 * and since PATM should only patch the start of an instruction there
1072 * should be no need to check again here.
1073 */
1074 if (!pVCpu->iem.s.fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1077 cbToTryRead, PGMACCESSORIGIN_IEM);
1078 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1079 { /* likely */ }
1080 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1081 {
1082 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1083 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1084 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1085 }
1086 else
1087 {
1088 Log((RT_SUCCESS(rcStrict)
1089 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1090 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1091 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1092 return rcStrict;
1093 }
1094 }
1095 else
1096 {
1097 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1098 if (RT_SUCCESS(rc))
1099 { /* likely */ }
1100 else
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1103 return rc;
1104 }
1105 }
1106 pVCpu->iem.s.cbOpcode += cbToTryRead;
1107 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1108
1109 return VINF_SUCCESS;
1110}
1111
1112#endif /* !IEM_WITH_CODE_TLB */
1113#ifndef IEM_WITH_SETJMP
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pVCpu The cross context virtual CPU structure of the
1120 * calling thread.
1121 * @param pb Where to return the opcode byte.
1122 */
1123VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1124{
1125 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1126 if (rcStrict == VINF_SUCCESS)
1127 {
1128 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1129 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1130 pVCpu->iem.s.offOpcode = offOpcode + 1;
1131 }
1132 else
1133 *pb = 0;
1134 return rcStrict;
1135}
1136
1137#else /* IEM_WITH_SETJMP */
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1141 *
1142 * @returns The opcode byte.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 */
1145uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1146{
1147# ifdef IEM_WITH_CODE_TLB
1148 uint8_t u8;
1149 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1150 return u8;
1151# else
1152 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1153 if (rcStrict == VINF_SUCCESS)
1154 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1155 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1156# endif
1157}
1158
1159#endif /* IEM_WITH_SETJMP */
1160
1161#ifndef IEM_WITH_SETJMP
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1168 * @param pu16 Where to return the opcode dword.
1169 */
1170VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1171{
1172 uint8_t u8;
1173 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1174 if (rcStrict == VINF_SUCCESS)
1175 *pu16 = (int8_t)u8;
1176 return rcStrict;
1177}
1178
1179
1180/**
1181 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1185 * @param pu32 Where to return the opcode dword.
1186 */
1187VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1188{
1189 uint8_t u8;
1190 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1191 if (rcStrict == VINF_SUCCESS)
1192 *pu32 = (int8_t)u8;
1193 return rcStrict;
1194}
1195
1196
1197/**
1198 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1199 *
1200 * @returns Strict VBox status code.
1201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1202 * @param pu64 Where to return the opcode qword.
1203 */
1204VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1205{
1206 uint8_t u8;
1207 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1208 if (rcStrict == VINF_SUCCESS)
1209 *pu64 = (int8_t)u8;
1210 return rcStrict;
1211}
1212
1213#endif /* !IEM_WITH_SETJMP */
1214
1215
1216#ifndef IEM_WITH_SETJMP
1217
1218/**
1219 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1220 *
1221 * @returns Strict VBox status code.
1222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1223 * @param pu16 Where to return the opcode word.
1224 */
1225VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1226{
1227 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1228 if (rcStrict == VINF_SUCCESS)
1229 {
1230 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1231# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1232 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1233# else
1234 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1235# endif
1236 pVCpu->iem.s.offOpcode = offOpcode + 2;
1237 }
1238 else
1239 *pu16 = 0;
1240 return rcStrict;
1241}
1242
1243#else /* IEM_WITH_SETJMP */
1244
1245/**
1246 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1247 *
1248 * @returns The opcode word.
1249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1250 */
1251uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1252{
1253# ifdef IEM_WITH_CODE_TLB
1254 uint16_t u16;
1255 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1256 return u16;
1257# else
1258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1259 if (rcStrict == VINF_SUCCESS)
1260 {
1261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1262 pVCpu->iem.s.offOpcode += 2;
1263# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1264 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1265# else
1266 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267# endif
1268 }
1269 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1270# endif
1271}
1272
1273#endif /* IEM_WITH_SETJMP */
1274
1275#ifndef IEM_WITH_SETJMP
1276
1277/**
1278 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1282 * @param pu32 Where to return the opcode double word.
1283 */
1284VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1285{
1286 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1287 if (rcStrict == VINF_SUCCESS)
1288 {
1289 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1290 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1291 pVCpu->iem.s.offOpcode = offOpcode + 2;
1292 }
1293 else
1294 *pu32 = 0;
1295 return rcStrict;
1296}
1297
1298
1299/**
1300 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1301 *
1302 * @returns Strict VBox status code.
1303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1304 * @param pu64 Where to return the opcode quad word.
1305 */
1306VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1307{
1308 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1309 if (rcStrict == VINF_SUCCESS)
1310 {
1311 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1312 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1313 pVCpu->iem.s.offOpcode = offOpcode + 2;
1314 }
1315 else
1316 *pu64 = 0;
1317 return rcStrict;
1318}
1319
1320#endif /* !IEM_WITH_SETJMP */
1321
1322#ifndef IEM_WITH_SETJMP
1323
1324/**
1325 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1326 *
1327 * @returns Strict VBox status code.
1328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1329 * @param pu32 Where to return the opcode dword.
1330 */
1331VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1332{
1333 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1334 if (rcStrict == VINF_SUCCESS)
1335 {
1336 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1337# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1338 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1339# else
1340 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1341 pVCpu->iem.s.abOpcode[offOpcode + 1],
1342 pVCpu->iem.s.abOpcode[offOpcode + 2],
1343 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1344# endif
1345 pVCpu->iem.s.offOpcode = offOpcode + 4;
1346 }
1347 else
1348 *pu32 = 0;
1349 return rcStrict;
1350}
1351
1352#else /* IEM_WITH_SETJMP */
1353
1354/**
1355 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1356 *
1357 * @returns The opcode dword.
1358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1359 */
1360uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1361{
1362# ifdef IEM_WITH_CODE_TLB
1363 uint32_t u32;
1364 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1365 return u32;
1366# else
1367 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1368 if (rcStrict == VINF_SUCCESS)
1369 {
1370 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1371 pVCpu->iem.s.offOpcode = offOpcode + 4;
1372# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1373 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1374# else
1375 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1376 pVCpu->iem.s.abOpcode[offOpcode + 1],
1377 pVCpu->iem.s.abOpcode[offOpcode + 2],
1378 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1379# endif
1380 }
1381 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1382# endif
1383}
1384
1385#endif /* IEM_WITH_SETJMP */
1386
1387#ifndef IEM_WITH_SETJMP
1388
1389/**
1390 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1391 *
1392 * @returns Strict VBox status code.
1393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1394 * @param pu64 Where to return the opcode dword.
1395 */
1396VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1397{
1398 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1399 if (rcStrict == VINF_SUCCESS)
1400 {
1401 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1402 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1403 pVCpu->iem.s.abOpcode[offOpcode + 1],
1404 pVCpu->iem.s.abOpcode[offOpcode + 2],
1405 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1406 pVCpu->iem.s.offOpcode = offOpcode + 4;
1407 }
1408 else
1409 *pu64 = 0;
1410 return rcStrict;
1411}
1412
1413
1414/**
1415 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1416 *
1417 * @returns Strict VBox status code.
1418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1419 * @param pu64 Where to return the opcode qword.
1420 */
1421VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1422{
1423 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1424 if (rcStrict == VINF_SUCCESS)
1425 {
1426 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1427 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1428 pVCpu->iem.s.abOpcode[offOpcode + 1],
1429 pVCpu->iem.s.abOpcode[offOpcode + 2],
1430 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1431 pVCpu->iem.s.offOpcode = offOpcode + 4;
1432 }
1433 else
1434 *pu64 = 0;
1435 return rcStrict;
1436}
1437
1438#endif /* !IEM_WITH_SETJMP */
1439
1440#ifndef IEM_WITH_SETJMP
1441
1442/**
1443 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1447 * @param pu64 Where to return the opcode qword.
1448 */
1449VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1450{
1451 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1452 if (rcStrict == VINF_SUCCESS)
1453 {
1454 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1455# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1456 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1457# else
1458 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1459 pVCpu->iem.s.abOpcode[offOpcode + 1],
1460 pVCpu->iem.s.abOpcode[offOpcode + 2],
1461 pVCpu->iem.s.abOpcode[offOpcode + 3],
1462 pVCpu->iem.s.abOpcode[offOpcode + 4],
1463 pVCpu->iem.s.abOpcode[offOpcode + 5],
1464 pVCpu->iem.s.abOpcode[offOpcode + 6],
1465 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1466# endif
1467 pVCpu->iem.s.offOpcode = offOpcode + 8;
1468 }
1469 else
1470 *pu64 = 0;
1471 return rcStrict;
1472}
1473
1474#else /* IEM_WITH_SETJMP */
1475
1476/**
1477 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1478 *
1479 * @returns The opcode qword.
1480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1481 */
1482uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1483{
1484# ifdef IEM_WITH_CODE_TLB
1485 uint64_t u64;
1486 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1487 return u64;
1488# else
1489 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1490 if (rcStrict == VINF_SUCCESS)
1491 {
1492 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1493 pVCpu->iem.s.offOpcode = offOpcode + 8;
1494# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1495 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1496# else
1497 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1498 pVCpu->iem.s.abOpcode[offOpcode + 1],
1499 pVCpu->iem.s.abOpcode[offOpcode + 2],
1500 pVCpu->iem.s.abOpcode[offOpcode + 3],
1501 pVCpu->iem.s.abOpcode[offOpcode + 4],
1502 pVCpu->iem.s.abOpcode[offOpcode + 5],
1503 pVCpu->iem.s.abOpcode[offOpcode + 6],
1504 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1505# endif
1506 }
1507 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1508# endif
1509}
1510
1511#endif /* IEM_WITH_SETJMP */
1512
1513
1514
1515/** @name Misc Worker Functions.
1516 * @{
1517 */
1518
1519/**
1520 * Gets the exception class for the specified exception vector.
1521 *
1522 * @returns The class of the specified exception.
1523 * @param uVector The exception vector.
1524 */
1525static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1526{
1527 Assert(uVector <= X86_XCPT_LAST);
1528 switch (uVector)
1529 {
1530 case X86_XCPT_DE:
1531 case X86_XCPT_TS:
1532 case X86_XCPT_NP:
1533 case X86_XCPT_SS:
1534 case X86_XCPT_GP:
1535 case X86_XCPT_SX: /* AMD only */
1536 return IEMXCPTCLASS_CONTRIBUTORY;
1537
1538 case X86_XCPT_PF:
1539 case X86_XCPT_VE: /* Intel only */
1540 return IEMXCPTCLASS_PAGE_FAULT;
1541
1542 case X86_XCPT_DF:
1543 return IEMXCPTCLASS_DOUBLE_FAULT;
1544 }
1545 return IEMXCPTCLASS_BENIGN;
1546}
1547
1548
1549/**
1550 * Evaluates how to handle an exception caused during delivery of another event
1551 * (exception / interrupt).
1552 *
1553 * @returns How to handle the recursive exception.
1554 * @param pVCpu The cross context virtual CPU structure of the
1555 * calling thread.
1556 * @param fPrevFlags The flags of the previous event.
1557 * @param uPrevVector The vector of the previous event.
1558 * @param fCurFlags The flags of the current exception.
1559 * @param uCurVector The vector of the current exception.
1560 * @param pfXcptRaiseInfo Where to store additional information about the
1561 * exception condition. Optional.
1562 */
1563VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1564 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1565{
1566 /*
1567 * Only CPU exceptions can be raised while delivering other events, software interrupt
1568 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1569 */
1570 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1571 Assert(pVCpu); RT_NOREF(pVCpu);
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1573
1574 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1575 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1576 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1577 {
1578 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1579 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1580 {
1581 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1582 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1583 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1584 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1585 {
1586 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1587 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1588 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1589 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1590 uCurVector, pVCpu->cpum.GstCtx.cr2));
1591 }
1592 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1593 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1594 {
1595 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1596 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1597 }
1598 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1599 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1600 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1601 {
1602 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1603 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1604 }
1605 }
1606 else
1607 {
1608 if (uPrevVector == X86_XCPT_NMI)
1609 {
1610 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1611 if (uCurVector == X86_XCPT_PF)
1612 {
1613 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1614 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1615 }
1616 }
1617 else if ( uPrevVector == X86_XCPT_AC
1618 && uCurVector == X86_XCPT_AC)
1619 {
1620 enmRaise = IEMXCPTRAISE_CPU_HANG;
1621 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1622 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1623 }
1624 }
1625 }
1626 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1627 {
1628 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1629 if (uCurVector == X86_XCPT_PF)
1630 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1631 }
1632 else
1633 {
1634 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1635 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1636 }
1637
1638 if (pfXcptRaiseInfo)
1639 *pfXcptRaiseInfo = fRaiseInfo;
1640 return enmRaise;
1641}
1642
1643
1644/**
1645 * Enters the CPU shutdown state initiated by a triple fault or other
1646 * unrecoverable conditions.
1647 *
1648 * @returns Strict VBox status code.
1649 * @param pVCpu The cross context virtual CPU structure of the
1650 * calling thread.
1651 */
1652static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1653{
1654 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1655 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1656
1657 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1658 {
1659 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1660 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1661 }
1662
1663 RT_NOREF(pVCpu);
1664 return VINF_EM_TRIPLE_FAULT;
1665}
1666
1667
1668/**
1669 * Validates a new SS segment.
1670 *
1671 * @returns VBox strict status code.
1672 * @param pVCpu The cross context virtual CPU structure of the
1673 * calling thread.
1674 * @param NewSS The new SS selctor.
1675 * @param uCpl The CPL to load the stack for.
1676 * @param pDesc Where to return the descriptor.
1677 */
1678static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1679{
1680 /* Null selectors are not allowed (we're not called for dispatching
1681 interrupts with SS=0 in long mode). */
1682 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1683 {
1684 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1685 return iemRaiseTaskSwitchFault0(pVCpu);
1686 }
1687
1688 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1689 if ((NewSS & X86_SEL_RPL) != uCpl)
1690 {
1691 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1692 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1693 }
1694
1695 /*
1696 * Read the descriptor.
1697 */
1698 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1699 if (rcStrict != VINF_SUCCESS)
1700 return rcStrict;
1701
1702 /*
1703 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1704 */
1705 if (!pDesc->Legacy.Gen.u1DescType)
1706 {
1707 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1708 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1709 }
1710
1711 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1712 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1713 {
1714 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1715 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1716 }
1717 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1718 {
1719 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1720 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1721 }
1722
1723 /* Is it there? */
1724 /** @todo testcase: Is this checked before the canonical / limit check below? */
1725 if (!pDesc->Legacy.Gen.u1Present)
1726 {
1727 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1728 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1729 }
1730
1731 return VINF_SUCCESS;
1732}
1733
1734/** @} */
1735
1736
1737/** @name Raising Exceptions.
1738 *
1739 * @{
1740 */
1741
1742
1743/**
1744 * Loads the specified stack far pointer from the TSS.
1745 *
1746 * @returns VBox strict status code.
1747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1748 * @param uCpl The CPL to load the stack for.
1749 * @param pSelSS Where to return the new stack segment.
1750 * @param puEsp Where to return the new stack pointer.
1751 */
1752static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1753{
1754 VBOXSTRICTRC rcStrict;
1755 Assert(uCpl < 4);
1756
1757 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1758 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1759 {
1760 /*
1761 * 16-bit TSS (X86TSS16).
1762 */
1763 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1764 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1765 {
1766 uint32_t off = uCpl * 4 + 2;
1767 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1768 {
1769 /** @todo check actual access pattern here. */
1770 uint32_t u32Tmp = 0; /* gcc maybe... */
1771 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1772 if (rcStrict == VINF_SUCCESS)
1773 {
1774 *puEsp = RT_LOWORD(u32Tmp);
1775 *pSelSS = RT_HIWORD(u32Tmp);
1776 return VINF_SUCCESS;
1777 }
1778 }
1779 else
1780 {
1781 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1782 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1783 }
1784 break;
1785 }
1786
1787 /*
1788 * 32-bit TSS (X86TSS32).
1789 */
1790 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1791 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1792 {
1793 uint32_t off = uCpl * 8 + 4;
1794 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1795 {
1796/** @todo check actual access pattern here. */
1797 uint64_t u64Tmp;
1798 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1799 if (rcStrict == VINF_SUCCESS)
1800 {
1801 *puEsp = u64Tmp & UINT32_MAX;
1802 *pSelSS = (RTSEL)(u64Tmp >> 32);
1803 return VINF_SUCCESS;
1804 }
1805 }
1806 else
1807 {
1808 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1809 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1810 }
1811 break;
1812 }
1813
1814 default:
1815 AssertFailed();
1816 rcStrict = VERR_IEM_IPE_4;
1817 break;
1818 }
1819
1820 *puEsp = 0; /* make gcc happy */
1821 *pSelSS = 0; /* make gcc happy */
1822 return rcStrict;
1823}
1824
1825
1826/**
1827 * Loads the specified stack pointer from the 64-bit TSS.
1828 *
1829 * @returns VBox strict status code.
1830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1831 * @param uCpl The CPL to load the stack for.
1832 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1833 * @param puRsp Where to return the new stack pointer.
1834 */
1835static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1836{
1837 Assert(uCpl < 4);
1838 Assert(uIst < 8);
1839 *puRsp = 0; /* make gcc happy */
1840
1841 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1842 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1843
1844 uint32_t off;
1845 if (uIst)
1846 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1847 else
1848 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1849 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1850 {
1851 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1852 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1853 }
1854
1855 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1856}
1857
1858
1859/**
1860 * Adjust the CPU state according to the exception being raised.
1861 *
1862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1863 * @param u8Vector The exception that has been raised.
1864 */
1865DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1866{
1867 switch (u8Vector)
1868 {
1869 case X86_XCPT_DB:
1870 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1871 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1872 break;
1873 /** @todo Read the AMD and Intel exception reference... */
1874 }
1875}
1876
1877
1878/**
1879 * Implements exceptions and interrupts for real mode.
1880 *
1881 * @returns VBox strict status code.
1882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1883 * @param cbInstr The number of bytes to offset rIP by in the return
1884 * address.
1885 * @param u8Vector The interrupt / exception vector number.
1886 * @param fFlags The flags.
1887 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1888 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1889 */
1890static VBOXSTRICTRC
1891iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1892 uint8_t cbInstr,
1893 uint8_t u8Vector,
1894 uint32_t fFlags,
1895 uint16_t uErr,
1896 uint64_t uCr2) RT_NOEXCEPT
1897{
1898 NOREF(uErr); NOREF(uCr2);
1899 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1900
1901 /*
1902 * Read the IDT entry.
1903 */
1904 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1905 {
1906 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1907 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1908 }
1909 RTFAR16 Idte;
1910 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1911 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1912 {
1913 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1914 return rcStrict;
1915 }
1916
1917 /*
1918 * Push the stack frame.
1919 */
1920 uint16_t *pu16Frame;
1921 uint64_t uNewRsp;
1922 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925
1926 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1927#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1928 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1929 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1930 fEfl |= UINT16_C(0xf000);
1931#endif
1932 pu16Frame[2] = (uint16_t)fEfl;
1933 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1934 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1935 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1936 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1937 return rcStrict;
1938
1939 /*
1940 * Load the vector address into cs:ip and make exception specific state
1941 * adjustments.
1942 */
1943 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1944 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1945 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1946 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1947 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1948 pVCpu->cpum.GstCtx.rip = Idte.off;
1949 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1950 IEMMISC_SET_EFL(pVCpu, fEfl);
1951
1952 /** @todo do we actually do this in real mode? */
1953 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1954 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1955
1956 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1957}
1958
1959
1960/**
1961 * Loads a NULL data selector into when coming from V8086 mode.
1962 *
1963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1964 * @param pSReg Pointer to the segment register.
1965 */
1966DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1967{
1968 pSReg->Sel = 0;
1969 pSReg->ValidSel = 0;
1970 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1971 {
1972 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1973 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1974 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1975 }
1976 else
1977 {
1978 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1979 /** @todo check this on AMD-V */
1980 pSReg->u64Base = 0;
1981 pSReg->u32Limit = 0;
1982 }
1983}
1984
1985
1986/**
1987 * Loads a segment selector during a task switch in V8086 mode.
1988 *
1989 * @param pSReg Pointer to the segment register.
1990 * @param uSel The selector value to load.
1991 */
1992DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1993{
1994 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1995 pSReg->Sel = uSel;
1996 pSReg->ValidSel = uSel;
1997 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1998 pSReg->u64Base = uSel << 4;
1999 pSReg->u32Limit = 0xffff;
2000 pSReg->Attr.u = 0xf3;
2001}
2002
2003
2004/**
2005 * Loads a segment selector during a task switch in protected mode.
2006 *
2007 * In this task switch scenario, we would throw \#TS exceptions rather than
2008 * \#GPs.
2009 *
2010 * @returns VBox strict status code.
2011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2012 * @param pSReg Pointer to the segment register.
2013 * @param uSel The new selector value.
2014 *
2015 * @remarks This does _not_ handle CS or SS.
2016 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2017 */
2018static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2019{
2020 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2021
2022 /* Null data selector. */
2023 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2024 {
2025 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2027 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2028 return VINF_SUCCESS;
2029 }
2030
2031 /* Fetch the descriptor. */
2032 IEMSELDESC Desc;
2033 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2034 if (rcStrict != VINF_SUCCESS)
2035 {
2036 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2037 VBOXSTRICTRC_VAL(rcStrict)));
2038 return rcStrict;
2039 }
2040
2041 /* Must be a data segment or readable code segment. */
2042 if ( !Desc.Legacy.Gen.u1DescType
2043 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2044 {
2045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2046 Desc.Legacy.Gen.u4Type));
2047 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2048 }
2049
2050 /* Check privileges for data segments and non-conforming code segments. */
2051 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2052 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2053 {
2054 /* The RPL and the new CPL must be less than or equal to the DPL. */
2055 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2056 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2057 {
2058 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2059 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2060 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2061 }
2062 }
2063
2064 /* Is it there? */
2065 if (!Desc.Legacy.Gen.u1Present)
2066 {
2067 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2068 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2069 }
2070
2071 /* The base and limit. */
2072 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2073 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2074
2075 /*
2076 * Ok, everything checked out fine. Now set the accessed bit before
2077 * committing the result into the registers.
2078 */
2079 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2080 {
2081 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2085 }
2086
2087 /* Commit */
2088 pSReg->Sel = uSel;
2089 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2090 pSReg->u32Limit = cbLimit;
2091 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2092 pSReg->ValidSel = uSel;
2093 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2094 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2095 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2096
2097 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2098 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Performs a task switch.
2105 *
2106 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2107 * caller is responsible for performing the necessary checks (like DPL, TSS
2108 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2109 * reference for JMP, CALL, IRET.
2110 *
2111 * If the task switch is the due to a software interrupt or hardware exception,
2112 * the caller is responsible for validating the TSS selector and descriptor. See
2113 * Intel Instruction reference for INT n.
2114 *
2115 * @returns VBox strict status code.
2116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2117 * @param enmTaskSwitch The cause of the task switch.
2118 * @param uNextEip The EIP effective after the task switch.
2119 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2120 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2121 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2122 * @param SelTSS The TSS selector of the new task.
2123 * @param pNewDescTSS Pointer to the new TSS descriptor.
2124 */
2125VBOXSTRICTRC
2126iemTaskSwitch(PVMCPUCC pVCpu,
2127 IEMTASKSWITCH enmTaskSwitch,
2128 uint32_t uNextEip,
2129 uint32_t fFlags,
2130 uint16_t uErr,
2131 uint64_t uCr2,
2132 RTSEL SelTSS,
2133 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2134{
2135 Assert(!IEM_IS_REAL_MODE(pVCpu));
2136 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2137 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2138
2139 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2140 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2141 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2142 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2143 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2144
2145 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2146 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2147
2148 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2149 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2150
2151 /* Update CR2 in case it's a page-fault. */
2152 /** @todo This should probably be done much earlier in IEM/PGM. See
2153 * @bugref{5653#c49}. */
2154 if (fFlags & IEM_XCPT_FLAGS_CR2)
2155 pVCpu->cpum.GstCtx.cr2 = uCr2;
2156
2157 /*
2158 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2159 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2160 */
2161 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2162 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2163 if (uNewTSSLimit < uNewTSSLimitMin)
2164 {
2165 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2166 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2167 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2168 }
2169
2170 /*
2171 * Task switches in VMX non-root mode always cause task switches.
2172 * The new TSS must have been read and validated (DPL, limits etc.) before a
2173 * task-switch VM-exit commences.
2174 *
2175 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2176 */
2177 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2178 {
2179 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2180 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2181 }
2182
2183 /*
2184 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2185 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2186 */
2187 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2188 {
2189 uint32_t const uExitInfo1 = SelTSS;
2190 uint32_t uExitInfo2 = uErr;
2191 switch (enmTaskSwitch)
2192 {
2193 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2194 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2195 default: break;
2196 }
2197 if (fFlags & IEM_XCPT_FLAGS_ERR)
2198 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2199 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2200 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2201
2202 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2203 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2204 RT_NOREF2(uExitInfo1, uExitInfo2);
2205 }
2206
2207 /*
2208 * Check the current TSS limit. The last written byte to the current TSS during the
2209 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2210 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2211 *
2212 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2213 * end up with smaller than "legal" TSS limits.
2214 */
2215 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2216 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2217 if (uCurTSSLimit < uCurTSSLimitMin)
2218 {
2219 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2220 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2221 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2222 }
2223
2224 /*
2225 * Verify that the new TSS can be accessed and map it. Map only the required contents
2226 * and not the entire TSS.
2227 */
2228 void *pvNewTSS;
2229 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2230 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2231 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2232 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2233 * not perform correct translation if this happens. See Intel spec. 7.2.1
2234 * "Task-State Segment". */
2235 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2236 if (rcStrict != VINF_SUCCESS)
2237 {
2238 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2239 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2240 return rcStrict;
2241 }
2242
2243 /*
2244 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2245 */
2246 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2247 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2248 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2249 {
2250 PX86DESC pDescCurTSS;
2251 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2252 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2253 if (rcStrict != VINF_SUCCESS)
2254 {
2255 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2256 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2257 return rcStrict;
2258 }
2259
2260 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2261 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2262 if (rcStrict != VINF_SUCCESS)
2263 {
2264 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2265 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2266 return rcStrict;
2267 }
2268
2269 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2270 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2271 {
2272 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2273 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2274 fEFlags &= ~X86_EFL_NT;
2275 }
2276 }
2277
2278 /*
2279 * Save the CPU state into the current TSS.
2280 */
2281 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2282 if (GCPtrNewTSS == GCPtrCurTSS)
2283 {
2284 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2285 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2286 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2287 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2288 pVCpu->cpum.GstCtx.ldtr.Sel));
2289 }
2290 if (fIsNewTSS386)
2291 {
2292 /*
2293 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2294 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2295 */
2296 void *pvCurTSS32;
2297 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2298 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2299 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2300 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2301 if (rcStrict != VINF_SUCCESS)
2302 {
2303 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2304 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2305 return rcStrict;
2306 }
2307
2308 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2309 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2310 pCurTSS32->eip = uNextEip;
2311 pCurTSS32->eflags = fEFlags;
2312 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2313 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2314 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2315 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2316 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2317 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2318 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2319 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2320 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2321 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2322 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2323 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2324 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2325 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2326
2327 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2328 if (rcStrict != VINF_SUCCESS)
2329 {
2330 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2331 VBOXSTRICTRC_VAL(rcStrict)));
2332 return rcStrict;
2333 }
2334 }
2335 else
2336 {
2337 /*
2338 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2339 */
2340 void *pvCurTSS16;
2341 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2342 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2343 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2344 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2345 if (rcStrict != VINF_SUCCESS)
2346 {
2347 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2348 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2349 return rcStrict;
2350 }
2351
2352 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2353 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2354 pCurTSS16->ip = uNextEip;
2355 pCurTSS16->flags = (uint16_t)fEFlags;
2356 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2357 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2358 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2359 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2360 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2361 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2362 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2363 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2364 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2365 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2366 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2367 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2368
2369 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2370 if (rcStrict != VINF_SUCCESS)
2371 {
2372 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2373 VBOXSTRICTRC_VAL(rcStrict)));
2374 return rcStrict;
2375 }
2376 }
2377
2378 /*
2379 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2380 */
2381 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2382 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2383 {
2384 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2385 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2386 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2387 }
2388
2389 /*
2390 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2391 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2392 */
2393 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2394 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2395 bool fNewDebugTrap;
2396 if (fIsNewTSS386)
2397 {
2398 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2399 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2400 uNewEip = pNewTSS32->eip;
2401 uNewEflags = pNewTSS32->eflags;
2402 uNewEax = pNewTSS32->eax;
2403 uNewEcx = pNewTSS32->ecx;
2404 uNewEdx = pNewTSS32->edx;
2405 uNewEbx = pNewTSS32->ebx;
2406 uNewEsp = pNewTSS32->esp;
2407 uNewEbp = pNewTSS32->ebp;
2408 uNewEsi = pNewTSS32->esi;
2409 uNewEdi = pNewTSS32->edi;
2410 uNewES = pNewTSS32->es;
2411 uNewCS = pNewTSS32->cs;
2412 uNewSS = pNewTSS32->ss;
2413 uNewDS = pNewTSS32->ds;
2414 uNewFS = pNewTSS32->fs;
2415 uNewGS = pNewTSS32->gs;
2416 uNewLdt = pNewTSS32->selLdt;
2417 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2418 }
2419 else
2420 {
2421 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2422 uNewCr3 = 0;
2423 uNewEip = pNewTSS16->ip;
2424 uNewEflags = pNewTSS16->flags;
2425 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2426 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2427 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2428 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2429 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2430 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2431 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2432 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2433 uNewES = pNewTSS16->es;
2434 uNewCS = pNewTSS16->cs;
2435 uNewSS = pNewTSS16->ss;
2436 uNewDS = pNewTSS16->ds;
2437 uNewFS = 0;
2438 uNewGS = 0;
2439 uNewLdt = pNewTSS16->selLdt;
2440 fNewDebugTrap = false;
2441 }
2442
2443 if (GCPtrNewTSS == GCPtrCurTSS)
2444 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2445 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2446
2447 /*
2448 * We're done accessing the new TSS.
2449 */
2450 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2454 return rcStrict;
2455 }
2456
2457 /*
2458 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2459 */
2460 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2461 {
2462 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2463 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2464 if (rcStrict != VINF_SUCCESS)
2465 {
2466 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2467 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2468 return rcStrict;
2469 }
2470
2471 /* Check that the descriptor indicates the new TSS is available (not busy). */
2472 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2473 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2474 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2475
2476 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484 }
2485
2486 /*
2487 * From this point on, we're technically in the new task. We will defer exceptions
2488 * until the completion of the task switch but before executing any instructions in the new task.
2489 */
2490 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2491 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2492 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2493 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2494 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2495 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2496 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2497
2498 /* Set the busy bit in TR. */
2499 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2500
2501 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2502 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2503 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2504 {
2505 uNewEflags |= X86_EFL_NT;
2506 }
2507
2508 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2509 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2510 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2511
2512 pVCpu->cpum.GstCtx.eip = uNewEip;
2513 pVCpu->cpum.GstCtx.eax = uNewEax;
2514 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2515 pVCpu->cpum.GstCtx.edx = uNewEdx;
2516 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2517 pVCpu->cpum.GstCtx.esp = uNewEsp;
2518 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2519 pVCpu->cpum.GstCtx.esi = uNewEsi;
2520 pVCpu->cpum.GstCtx.edi = uNewEdi;
2521
2522 uNewEflags &= X86_EFL_LIVE_MASK;
2523 uNewEflags |= X86_EFL_RA1_MASK;
2524 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2525
2526 /*
2527 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2528 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2529 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2530 */
2531 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2532 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2533
2534 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2535 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2536
2537 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2538 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2539
2540 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2541 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2542
2543 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2544 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2545
2546 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2547 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2548 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2549
2550 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2551 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2552 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2553 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2554
2555 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2556 {
2557 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2558 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2559 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2560 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2561 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2562 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2563 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2564 }
2565
2566 /*
2567 * Switch CR3 for the new task.
2568 */
2569 if ( fIsNewTSS386
2570 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2571 {
2572 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2573 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2574 AssertRCSuccessReturn(rc, rc);
2575
2576 /* Inform PGM. */
2577 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2578 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2579 AssertRCReturn(rc, rc);
2580 /* ignore informational status codes */
2581
2582 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2583 }
2584
2585 /*
2586 * Switch LDTR for the new task.
2587 */
2588 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2589 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2590 else
2591 {
2592 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2593
2594 IEMSELDESC DescNewLdt;
2595 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2596 if (rcStrict != VINF_SUCCESS)
2597 {
2598 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2599 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2600 return rcStrict;
2601 }
2602 if ( !DescNewLdt.Legacy.Gen.u1Present
2603 || DescNewLdt.Legacy.Gen.u1DescType
2604 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2605 {
2606 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2607 uNewLdt, DescNewLdt.Legacy.u));
2608 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2609 }
2610
2611 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2612 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2613 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2614 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2615 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2616 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2617 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2618 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2619 }
2620
2621 IEMSELDESC DescSS;
2622 if (IEM_IS_V86_MODE(pVCpu))
2623 {
2624 pVCpu->iem.s.uCpl = 3;
2625 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2626 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2627 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2628 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2629 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2630 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2631
2632 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2633 DescSS.Legacy.u = 0;
2634 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2635 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2636 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2637 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2638 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2639 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2640 DescSS.Legacy.Gen.u2Dpl = 3;
2641 }
2642 else
2643 {
2644 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2645
2646 /*
2647 * Load the stack segment for the new task.
2648 */
2649 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2650 {
2651 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2652 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2653 }
2654
2655 /* Fetch the descriptor. */
2656 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2657 if (rcStrict != VINF_SUCCESS)
2658 {
2659 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2660 VBOXSTRICTRC_VAL(rcStrict)));
2661 return rcStrict;
2662 }
2663
2664 /* SS must be a data segment and writable. */
2665 if ( !DescSS.Legacy.Gen.u1DescType
2666 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2667 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2668 {
2669 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2670 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2671 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2672 }
2673
2674 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2675 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2676 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2677 {
2678 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2679 uNewCpl));
2680 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2681 }
2682
2683 /* Is it there? */
2684 if (!DescSS.Legacy.Gen.u1Present)
2685 {
2686 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2687 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2688 }
2689
2690 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2691 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2692
2693 /* Set the accessed bit before committing the result into SS. */
2694 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2695 {
2696 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2700 }
2701
2702 /* Commit SS. */
2703 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2704 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2705 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2706 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2707 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2708 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2709 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2710
2711 /* CPL has changed, update IEM before loading rest of segments. */
2712 pVCpu->iem.s.uCpl = uNewCpl;
2713
2714 /*
2715 * Load the data segments for the new task.
2716 */
2717 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2718 if (rcStrict != VINF_SUCCESS)
2719 return rcStrict;
2720 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2721 if (rcStrict != VINF_SUCCESS)
2722 return rcStrict;
2723 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2724 if (rcStrict != VINF_SUCCESS)
2725 return rcStrict;
2726 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2727 if (rcStrict != VINF_SUCCESS)
2728 return rcStrict;
2729
2730 /*
2731 * Load the code segment for the new task.
2732 */
2733 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2734 {
2735 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2736 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2737 }
2738
2739 /* Fetch the descriptor. */
2740 IEMSELDESC DescCS;
2741 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2742 if (rcStrict != VINF_SUCCESS)
2743 {
2744 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2745 return rcStrict;
2746 }
2747
2748 /* CS must be a code segment. */
2749 if ( !DescCS.Legacy.Gen.u1DescType
2750 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2751 {
2752 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2753 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2754 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2755 }
2756
2757 /* For conforming CS, DPL must be less than or equal to the RPL. */
2758 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2759 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2760 {
2761 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2762 DescCS.Legacy.Gen.u2Dpl));
2763 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2764 }
2765
2766 /* For non-conforming CS, DPL must match RPL. */
2767 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2768 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2769 {
2770 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2771 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2772 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2773 }
2774
2775 /* Is it there? */
2776 if (!DescCS.Legacy.Gen.u1Present)
2777 {
2778 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2779 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2780 }
2781
2782 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2783 u64Base = X86DESC_BASE(&DescCS.Legacy);
2784
2785 /* Set the accessed bit before committing the result into CS. */
2786 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2787 {
2788 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2789 if (rcStrict != VINF_SUCCESS)
2790 return rcStrict;
2791 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2792 }
2793
2794 /* Commit CS. */
2795 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2796 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2797 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2798 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2799 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2800 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2802 }
2803
2804 /** @todo Debug trap. */
2805 if (fIsNewTSS386 && fNewDebugTrap)
2806 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2807
2808 /*
2809 * Construct the error code masks based on what caused this task switch.
2810 * See Intel Instruction reference for INT.
2811 */
2812 uint16_t uExt;
2813 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2814 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2815 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2816 {
2817 uExt = 1;
2818 }
2819 else
2820 uExt = 0;
2821
2822 /*
2823 * Push any error code on to the new stack.
2824 */
2825 if (fFlags & IEM_XCPT_FLAGS_ERR)
2826 {
2827 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2828 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2829 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2830
2831 /* Check that there is sufficient space on the stack. */
2832 /** @todo Factor out segment limit checking for normal/expand down segments
2833 * into a separate function. */
2834 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2835 {
2836 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2837 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2838 {
2839 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2840 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2841 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2842 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2843 }
2844 }
2845 else
2846 {
2847 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2848 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2849 {
2850 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2851 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2852 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2853 }
2854 }
2855
2856
2857 if (fIsNewTSS386)
2858 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2859 else
2860 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2861 if (rcStrict != VINF_SUCCESS)
2862 {
2863 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2864 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2865 return rcStrict;
2866 }
2867 }
2868
2869 /* Check the new EIP against the new CS limit. */
2870 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2871 {
2872 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2873 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2874 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2875 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2876 }
2877
2878 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2879 pVCpu->cpum.GstCtx.ss.Sel));
2880 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2881}
2882
2883
2884/**
2885 * Implements exceptions and interrupts for protected mode.
2886 *
2887 * @returns VBox strict status code.
2888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2889 * @param cbInstr The number of bytes to offset rIP by in the return
2890 * address.
2891 * @param u8Vector The interrupt / exception vector number.
2892 * @param fFlags The flags.
2893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2895 */
2896static VBOXSTRICTRC
2897iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2898 uint8_t cbInstr,
2899 uint8_t u8Vector,
2900 uint32_t fFlags,
2901 uint16_t uErr,
2902 uint64_t uCr2) RT_NOEXCEPT
2903{
2904 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2905
2906 /*
2907 * Read the IDT entry.
2908 */
2909 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2910 {
2911 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2912 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2913 }
2914 X86DESC Idte;
2915 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2916 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2917 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2918 {
2919 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2920 return rcStrict;
2921 }
2922 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2923 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2924 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2925
2926 /*
2927 * Check the descriptor type, DPL and such.
2928 * ASSUMES this is done in the same order as described for call-gate calls.
2929 */
2930 if (Idte.Gate.u1DescType)
2931 {
2932 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2933 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2934 }
2935 bool fTaskGate = false;
2936 uint8_t f32BitGate = true;
2937 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2938 switch (Idte.Gate.u4Type)
2939 {
2940 case X86_SEL_TYPE_SYS_UNDEFINED:
2941 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2942 case X86_SEL_TYPE_SYS_LDT:
2943 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2944 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2945 case X86_SEL_TYPE_SYS_UNDEFINED2:
2946 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2947 case X86_SEL_TYPE_SYS_UNDEFINED3:
2948 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2949 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2950 case X86_SEL_TYPE_SYS_UNDEFINED4:
2951 {
2952 /** @todo check what actually happens when the type is wrong...
2953 * esp. call gates. */
2954 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2955 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2956 }
2957
2958 case X86_SEL_TYPE_SYS_286_INT_GATE:
2959 f32BitGate = false;
2960 RT_FALL_THRU();
2961 case X86_SEL_TYPE_SYS_386_INT_GATE:
2962 fEflToClear |= X86_EFL_IF;
2963 break;
2964
2965 case X86_SEL_TYPE_SYS_TASK_GATE:
2966 fTaskGate = true;
2967#ifndef IEM_IMPLEMENTS_TASKSWITCH
2968 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2969#endif
2970 break;
2971
2972 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2973 f32BitGate = false;
2974 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2975 break;
2976
2977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2978 }
2979
2980 /* Check DPL against CPL if applicable. */
2981 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2982 {
2983 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2984 {
2985 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2986 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2987 }
2988 }
2989
2990 /* Is it there? */
2991 if (!Idte.Gate.u1Present)
2992 {
2993 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2994 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2995 }
2996
2997 /* Is it a task-gate? */
2998 if (fTaskGate)
2999 {
3000 /*
3001 * Construct the error code masks based on what caused this task switch.
3002 * See Intel Instruction reference for INT.
3003 */
3004 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3005 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3006 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3007 RTSEL SelTSS = Idte.Gate.u16Sel;
3008
3009 /*
3010 * Fetch the TSS descriptor in the GDT.
3011 */
3012 IEMSELDESC DescTSS;
3013 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3014 if (rcStrict != VINF_SUCCESS)
3015 {
3016 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3017 VBOXSTRICTRC_VAL(rcStrict)));
3018 return rcStrict;
3019 }
3020
3021 /* The TSS descriptor must be a system segment and be available (not busy). */
3022 if ( DescTSS.Legacy.Gen.u1DescType
3023 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3024 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3025 {
3026 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3027 u8Vector, SelTSS, DescTSS.Legacy.au64));
3028 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3029 }
3030
3031 /* The TSS must be present. */
3032 if (!DescTSS.Legacy.Gen.u1Present)
3033 {
3034 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3035 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3036 }
3037
3038 /* Do the actual task switch. */
3039 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3040 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3041 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3042 }
3043
3044 /* A null CS is bad. */
3045 RTSEL NewCS = Idte.Gate.u16Sel;
3046 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3047 {
3048 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3049 return iemRaiseGeneralProtectionFault0(pVCpu);
3050 }
3051
3052 /* Fetch the descriptor for the new CS. */
3053 IEMSELDESC DescCS;
3054 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3055 if (rcStrict != VINF_SUCCESS)
3056 {
3057 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3058 return rcStrict;
3059 }
3060
3061 /* Must be a code segment. */
3062 if (!DescCS.Legacy.Gen.u1DescType)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3065 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3066 }
3067 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3068 {
3069 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3070 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3071 }
3072
3073 /* Don't allow lowering the privilege level. */
3074 /** @todo Does the lowering of privileges apply to software interrupts
3075 * only? This has bearings on the more-privileged or
3076 * same-privilege stack behavior further down. A testcase would
3077 * be nice. */
3078 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3079 {
3080 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3081 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3082 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3083 }
3084
3085 /* Make sure the selector is present. */
3086 if (!DescCS.Legacy.Gen.u1Present)
3087 {
3088 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3089 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3090 }
3091
3092 /* Check the new EIP against the new CS limit. */
3093 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3094 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3095 ? Idte.Gate.u16OffsetLow
3096 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3097 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3098 if (uNewEip > cbLimitCS)
3099 {
3100 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3101 u8Vector, uNewEip, cbLimitCS, NewCS));
3102 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3103 }
3104 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3105
3106 /* Calc the flag image to push. */
3107 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3108 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3109 fEfl &= ~X86_EFL_RF;
3110 else
3111 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3112
3113 /* From V8086 mode only go to CPL 0. */
3114 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3115 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3116 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3117 {
3118 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3119 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3120 }
3121
3122 /*
3123 * If the privilege level changes, we need to get a new stack from the TSS.
3124 * This in turns means validating the new SS and ESP...
3125 */
3126 if (uNewCpl != pVCpu->iem.s.uCpl)
3127 {
3128 RTSEL NewSS;
3129 uint32_t uNewEsp;
3130 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3131 if (rcStrict != VINF_SUCCESS)
3132 return rcStrict;
3133
3134 IEMSELDESC DescSS;
3135 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3136 if (rcStrict != VINF_SUCCESS)
3137 return rcStrict;
3138 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3139 if (!DescSS.Legacy.Gen.u1DefBig)
3140 {
3141 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3142 uNewEsp = (uint16_t)uNewEsp;
3143 }
3144
3145 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3146
3147 /* Check that there is sufficient space for the stack frame. */
3148 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3149 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3150 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3151 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3152
3153 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3154 {
3155 if ( uNewEsp - 1 > cbLimitSS
3156 || uNewEsp < cbStackFrame)
3157 {
3158 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3159 u8Vector, NewSS, uNewEsp, cbStackFrame));
3160 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3161 }
3162 }
3163 else
3164 {
3165 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3166 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3167 {
3168 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3169 u8Vector, NewSS, uNewEsp, cbStackFrame));
3170 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3171 }
3172 }
3173
3174 /*
3175 * Start making changes.
3176 */
3177
3178 /* Set the new CPL so that stack accesses use it. */
3179 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3180 pVCpu->iem.s.uCpl = uNewCpl;
3181
3182 /* Create the stack frame. */
3183 RTPTRUNION uStackFrame;
3184 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3185 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3186 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3187 if (rcStrict != VINF_SUCCESS)
3188 return rcStrict;
3189 void * const pvStackFrame = uStackFrame.pv;
3190 if (f32BitGate)
3191 {
3192 if (fFlags & IEM_XCPT_FLAGS_ERR)
3193 *uStackFrame.pu32++ = uErr;
3194 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3195 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3196 uStackFrame.pu32[2] = fEfl;
3197 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3198 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3199 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3200 if (fEfl & X86_EFL_VM)
3201 {
3202 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3203 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3204 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3205 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3206 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3207 }
3208 }
3209 else
3210 {
3211 if (fFlags & IEM_XCPT_FLAGS_ERR)
3212 *uStackFrame.pu16++ = uErr;
3213 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3214 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3215 uStackFrame.pu16[2] = fEfl;
3216 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3217 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3218 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3219 if (fEfl & X86_EFL_VM)
3220 {
3221 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3222 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3223 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3224 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3225 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3226 }
3227 }
3228 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3229 if (rcStrict != VINF_SUCCESS)
3230 return rcStrict;
3231
3232 /* Mark the selectors 'accessed' (hope this is the correct time). */
3233 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3234 * after pushing the stack frame? (Write protect the gdt + stack to
3235 * find out.) */
3236 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3237 {
3238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3239 if (rcStrict != VINF_SUCCESS)
3240 return rcStrict;
3241 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3242 }
3243
3244 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3245 {
3246 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3247 if (rcStrict != VINF_SUCCESS)
3248 return rcStrict;
3249 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3250 }
3251
3252 /*
3253 * Start comitting the register changes (joins with the DPL=CPL branch).
3254 */
3255 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3256 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3257 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3258 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3259 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3260 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3261 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3262 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3263 * SP is loaded).
3264 * Need to check the other combinations too:
3265 * - 16-bit TSS, 32-bit handler
3266 * - 32-bit TSS, 16-bit handler */
3267 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3268 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3269 else
3270 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3271
3272 if (fEfl & X86_EFL_VM)
3273 {
3274 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3275 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3276 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3277 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3278 }
3279 }
3280 /*
3281 * Same privilege, no stack change and smaller stack frame.
3282 */
3283 else
3284 {
3285 uint64_t uNewRsp;
3286 RTPTRUNION uStackFrame;
3287 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3288 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3289 if (rcStrict != VINF_SUCCESS)
3290 return rcStrict;
3291 void * const pvStackFrame = uStackFrame.pv;
3292
3293 if (f32BitGate)
3294 {
3295 if (fFlags & IEM_XCPT_FLAGS_ERR)
3296 *uStackFrame.pu32++ = uErr;
3297 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3298 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3299 uStackFrame.pu32[2] = fEfl;
3300 }
3301 else
3302 {
3303 if (fFlags & IEM_XCPT_FLAGS_ERR)
3304 *uStackFrame.pu16++ = uErr;
3305 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3306 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3307 uStackFrame.pu16[2] = fEfl;
3308 }
3309 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3310 if (rcStrict != VINF_SUCCESS)
3311 return rcStrict;
3312
3313 /* Mark the CS selector as 'accessed'. */
3314 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3315 {
3316 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3317 if (rcStrict != VINF_SUCCESS)
3318 return rcStrict;
3319 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3320 }
3321
3322 /*
3323 * Start committing the register changes (joins with the other branch).
3324 */
3325 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3326 }
3327
3328 /* ... register committing continues. */
3329 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3330 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3331 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3332 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3333 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3334 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3335
3336 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3337 fEfl &= ~fEflToClear;
3338 IEMMISC_SET_EFL(pVCpu, fEfl);
3339
3340 if (fFlags & IEM_XCPT_FLAGS_CR2)
3341 pVCpu->cpum.GstCtx.cr2 = uCr2;
3342
3343 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3344 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3345
3346 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3347}
3348
3349
3350/**
3351 * Implements exceptions and interrupts for long mode.
3352 *
3353 * @returns VBox strict status code.
3354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3355 * @param cbInstr The number of bytes to offset rIP by in the return
3356 * address.
3357 * @param u8Vector The interrupt / exception vector number.
3358 * @param fFlags The flags.
3359 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3360 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3361 */
3362static VBOXSTRICTRC
3363iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3364 uint8_t cbInstr,
3365 uint8_t u8Vector,
3366 uint32_t fFlags,
3367 uint16_t uErr,
3368 uint64_t uCr2) RT_NOEXCEPT
3369{
3370 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3371
3372 /*
3373 * Read the IDT entry.
3374 */
3375 uint16_t offIdt = (uint16_t)u8Vector << 4;
3376 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3377 {
3378 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3379 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3380 }
3381 X86DESC64 Idte;
3382#ifdef _MSC_VER /* Shut up silly compiler warning. */
3383 Idte.au64[0] = 0;
3384 Idte.au64[1] = 0;
3385#endif
3386 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3387 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3388 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3389 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3390 {
3391 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3392 return rcStrict;
3393 }
3394 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3395 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3396 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3397
3398 /*
3399 * Check the descriptor type, DPL and such.
3400 * ASSUMES this is done in the same order as described for call-gate calls.
3401 */
3402 if (Idte.Gate.u1DescType)
3403 {
3404 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3405 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3406 }
3407 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3408 switch (Idte.Gate.u4Type)
3409 {
3410 case AMD64_SEL_TYPE_SYS_INT_GATE:
3411 fEflToClear |= X86_EFL_IF;
3412 break;
3413 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3414 break;
3415
3416 default:
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3418 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3419 }
3420
3421 /* Check DPL against CPL if applicable. */
3422 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3423 {
3424 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3425 {
3426 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3427 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3428 }
3429 }
3430
3431 /* Is it there? */
3432 if (!Idte.Gate.u1Present)
3433 {
3434 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3435 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3436 }
3437
3438 /* A null CS is bad. */
3439 RTSEL NewCS = Idte.Gate.u16Sel;
3440 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3441 {
3442 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3443 return iemRaiseGeneralProtectionFault0(pVCpu);
3444 }
3445
3446 /* Fetch the descriptor for the new CS. */
3447 IEMSELDESC DescCS;
3448 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3449 if (rcStrict != VINF_SUCCESS)
3450 {
3451 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3452 return rcStrict;
3453 }
3454
3455 /* Must be a 64-bit code segment. */
3456 if (!DescCS.Long.Gen.u1DescType)
3457 {
3458 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3459 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3460 }
3461 if ( !DescCS.Long.Gen.u1Long
3462 || DescCS.Long.Gen.u1DefBig
3463 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3464 {
3465 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3466 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3467 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3468 }
3469
3470 /* Don't allow lowering the privilege level. For non-conforming CS
3471 selectors, the CS.DPL sets the privilege level the trap/interrupt
3472 handler runs at. For conforming CS selectors, the CPL remains
3473 unchanged, but the CS.DPL must be <= CPL. */
3474 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3475 * when CPU in Ring-0. Result \#GP? */
3476 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3477 {
3478 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3479 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3480 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3481 }
3482
3483
3484 /* Make sure the selector is present. */
3485 if (!DescCS.Legacy.Gen.u1Present)
3486 {
3487 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3488 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3489 }
3490
3491 /* Check that the new RIP is canonical. */
3492 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3493 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3494 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3495 if (!IEM_IS_CANONICAL(uNewRip))
3496 {
3497 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3498 return iemRaiseGeneralProtectionFault0(pVCpu);
3499 }
3500
3501 /*
3502 * If the privilege level changes or if the IST isn't zero, we need to get
3503 * a new stack from the TSS.
3504 */
3505 uint64_t uNewRsp;
3506 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3507 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3508 if ( uNewCpl != pVCpu->iem.s.uCpl
3509 || Idte.Gate.u3IST != 0)
3510 {
3511 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3512 if (rcStrict != VINF_SUCCESS)
3513 return rcStrict;
3514 }
3515 else
3516 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3517 uNewRsp &= ~(uint64_t)0xf;
3518
3519 /*
3520 * Calc the flag image to push.
3521 */
3522 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3523 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3524 fEfl &= ~X86_EFL_RF;
3525 else
3526 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3527
3528 /*
3529 * Start making changes.
3530 */
3531 /* Set the new CPL so that stack accesses use it. */
3532 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3533 pVCpu->iem.s.uCpl = uNewCpl;
3534
3535 /* Create the stack frame. */
3536 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3537 RTPTRUNION uStackFrame;
3538 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3539 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 void * const pvStackFrame = uStackFrame.pv;
3543
3544 if (fFlags & IEM_XCPT_FLAGS_ERR)
3545 *uStackFrame.pu64++ = uErr;
3546 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3547 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3548 uStackFrame.pu64[2] = fEfl;
3549 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3550 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3551 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3552 if (rcStrict != VINF_SUCCESS)
3553 return rcStrict;
3554
3555 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3556 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3557 * after pushing the stack frame? (Write protect the gdt + stack to
3558 * find out.) */
3559 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3560 {
3561 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3562 if (rcStrict != VINF_SUCCESS)
3563 return rcStrict;
3564 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3565 }
3566
3567 /*
3568 * Start comitting the register changes.
3569 */
3570 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3571 * hidden registers when interrupting 32-bit or 16-bit code! */
3572 if (uNewCpl != uOldCpl)
3573 {
3574 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3575 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3576 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3577 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3578 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3579 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3580 }
3581 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3582 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3583 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3584 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3585 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3586 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3587 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3588 pVCpu->cpum.GstCtx.rip = uNewRip;
3589
3590 fEfl &= ~fEflToClear;
3591 IEMMISC_SET_EFL(pVCpu, fEfl);
3592
3593 if (fFlags & IEM_XCPT_FLAGS_CR2)
3594 pVCpu->cpum.GstCtx.cr2 = uCr2;
3595
3596 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3597 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3598
3599 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3600}
3601
3602
3603/**
3604 * Implements exceptions and interrupts.
3605 *
3606 * All exceptions and interrupts goes thru this function!
3607 *
3608 * @returns VBox strict status code.
3609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3610 * @param cbInstr The number of bytes to offset rIP by in the return
3611 * address.
3612 * @param u8Vector The interrupt / exception vector number.
3613 * @param fFlags The flags.
3614 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3615 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3616 */
3617VBOXSTRICTRC
3618iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3619 uint8_t cbInstr,
3620 uint8_t u8Vector,
3621 uint32_t fFlags,
3622 uint16_t uErr,
3623 uint64_t uCr2) RT_NOEXCEPT
3624{
3625 /*
3626 * Get all the state that we might need here.
3627 */
3628 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3629 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3630
3631#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3632 /*
3633 * Flush prefetch buffer
3634 */
3635 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3636#endif
3637
3638 /*
3639 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3640 */
3641 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3642 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3643 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3644 | IEM_XCPT_FLAGS_BP_INSTR
3645 | IEM_XCPT_FLAGS_ICEBP_INSTR
3646 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3647 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3648 {
3649 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3650 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3651 u8Vector = X86_XCPT_GP;
3652 uErr = 0;
3653 }
3654#ifdef DBGFTRACE_ENABLED
3655 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3656 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3657 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3658#endif
3659
3660 /*
3661 * Evaluate whether NMI blocking should be in effect.
3662 * Normally, NMI blocking is in effect whenever we inject an NMI.
3663 */
3664 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3665 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3666
3667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3668 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3669 {
3670 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3671 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3672 return rcStrict0;
3673
3674 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3675 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3676 {
3677 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3678 fBlockNmi = false;
3679 }
3680 }
3681#endif
3682
3683#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3684 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3685 {
3686 /*
3687 * If the event is being injected as part of VMRUN, it isn't subject to event
3688 * intercepts in the nested-guest. However, secondary exceptions that occur
3689 * during injection of any event -are- subject to exception intercepts.
3690 *
3691 * See AMD spec. 15.20 "Event Injection".
3692 */
3693 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3694 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3695 else
3696 {
3697 /*
3698 * Check and handle if the event being raised is intercepted.
3699 */
3700 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3701 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3702 return rcStrict0;
3703 }
3704 }
3705#endif
3706
3707 /*
3708 * Set NMI blocking if necessary.
3709 */
3710 if (fBlockNmi)
3711 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3712
3713 /*
3714 * Do recursion accounting.
3715 */
3716 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3717 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3718 if (pVCpu->iem.s.cXcptRecursions == 0)
3719 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3720 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3721 else
3722 {
3723 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3724 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3725 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3726
3727 if (pVCpu->iem.s.cXcptRecursions >= 4)
3728 {
3729#ifdef DEBUG_bird
3730 AssertFailed();
3731#endif
3732 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3733 }
3734
3735 /*
3736 * Evaluate the sequence of recurring events.
3737 */
3738 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3739 NULL /* pXcptRaiseInfo */);
3740 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3741 { /* likely */ }
3742 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3743 {
3744 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3745 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3746 u8Vector = X86_XCPT_DF;
3747 uErr = 0;
3748#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3749 /* VMX nested-guest #DF intercept needs to be checked here. */
3750 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3751 {
3752 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3753 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3754 return rcStrict0;
3755 }
3756#endif
3757 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3758 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3759 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3760 }
3761 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3762 {
3763 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3764 return iemInitiateCpuShutdown(pVCpu);
3765 }
3766 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3767 {
3768 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3769 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3770 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3771 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3772 return VERR_EM_GUEST_CPU_HANG;
3773 }
3774 else
3775 {
3776 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3777 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3778 return VERR_IEM_IPE_9;
3779 }
3780
3781 /*
3782 * The 'EXT' bit is set when an exception occurs during deliver of an external
3783 * event (such as an interrupt or earlier exception)[1]. Privileged software
3784 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3785 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3786 *
3787 * [1] - Intel spec. 6.13 "Error Code"
3788 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3789 * [3] - Intel Instruction reference for INT n.
3790 */
3791 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3792 && (fFlags & IEM_XCPT_FLAGS_ERR)
3793 && u8Vector != X86_XCPT_PF
3794 && u8Vector != X86_XCPT_DF)
3795 {
3796 uErr |= X86_TRAP_ERR_EXTERNAL;
3797 }
3798 }
3799
3800 pVCpu->iem.s.cXcptRecursions++;
3801 pVCpu->iem.s.uCurXcpt = u8Vector;
3802 pVCpu->iem.s.fCurXcpt = fFlags;
3803 pVCpu->iem.s.uCurXcptErr = uErr;
3804 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3805
3806 /*
3807 * Extensive logging.
3808 */
3809#if defined(LOG_ENABLED) && defined(IN_RING3)
3810 if (LogIs3Enabled())
3811 {
3812 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3813 PVM pVM = pVCpu->CTX_SUFF(pVM);
3814 char szRegs[4096];
3815 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3816 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3817 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3818 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3819 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3820 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3821 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3822 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3823 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3824 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3825 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3826 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3827 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3828 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3829 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3830 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3831 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3832 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3833 " efer=%016VR{efer}\n"
3834 " pat=%016VR{pat}\n"
3835 " sf_mask=%016VR{sf_mask}\n"
3836 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3837 " lstar=%016VR{lstar}\n"
3838 " star=%016VR{star} cstar=%016VR{cstar}\n"
3839 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3840 );
3841
3842 char szInstr[256];
3843 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3844 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3845 szInstr, sizeof(szInstr), NULL);
3846 Log3(("%s%s\n", szRegs, szInstr));
3847 }
3848#endif /* LOG_ENABLED */
3849
3850 /*
3851 * Stats.
3852 */
3853 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3854 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3855 else if (u8Vector <= X86_XCPT_LAST)
3856 {
3857 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3858 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3859 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3860 }
3861
3862 /*
3863 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3864 * to ensure that a stale TLB or paging cache entry will only cause one
3865 * spurious #PF.
3866 */
3867 if ( u8Vector == X86_XCPT_PF
3868 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3869 IEMTlbInvalidatePage(pVCpu, uCr2);
3870
3871 /*
3872 * Call the mode specific worker function.
3873 */
3874 VBOXSTRICTRC rcStrict;
3875 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3876 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3877 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3878 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3879 else
3880 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3881
3882 /* Flush the prefetch buffer. */
3883#ifdef IEM_WITH_CODE_TLB
3884 pVCpu->iem.s.pbInstrBuf = NULL;
3885#else
3886 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3887#endif
3888
3889 /*
3890 * Unwind.
3891 */
3892 pVCpu->iem.s.cXcptRecursions--;
3893 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3894 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3895 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3896 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3897 pVCpu->iem.s.cXcptRecursions + 1));
3898 return rcStrict;
3899}
3900
3901#ifdef IEM_WITH_SETJMP
3902/**
3903 * See iemRaiseXcptOrInt. Will not return.
3904 */
3905DECL_NO_RETURN(void)
3906iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3907 uint8_t cbInstr,
3908 uint8_t u8Vector,
3909 uint32_t fFlags,
3910 uint16_t uErr,
3911 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
3912{
3913 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3914 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
3915}
3916#endif
3917
3918
3919/** \#DE - 00. */
3920VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3921{
3922 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3923}
3924
3925
3926/** \#DB - 01.
3927 * @note This automatically clear DR7.GD. */
3928VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3929{
3930 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
3931 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3932 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
3933}
3934
3935
3936/** \#BR - 05. */
3937VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3938{
3939 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3940}
3941
3942
3943/** \#UD - 06. */
3944VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3945{
3946 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3947}
3948
3949
3950/** \#NM - 07. */
3951VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3952{
3953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3954}
3955
3956
3957/** \#TS(err) - 0a. */
3958VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3959{
3960 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3961}
3962
3963
3964/** \#TS(tr) - 0a. */
3965VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3966{
3967 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3968 pVCpu->cpum.GstCtx.tr.Sel, 0);
3969}
3970
3971
3972/** \#TS(0) - 0a. */
3973VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3974{
3975 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3976 0, 0);
3977}
3978
3979
3980/** \#TS(err) - 0a. */
3981VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3982{
3983 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3984 uSel & X86_SEL_MASK_OFF_RPL, 0);
3985}
3986
3987
3988/** \#NP(err) - 0b. */
3989VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3990{
3991 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3992}
3993
3994
3995/** \#NP(sel) - 0b. */
3996VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3997{
3998 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3999 uSel & ~X86_SEL_RPL, 0);
4000}
4001
4002
4003/** \#SS(seg) - 0c. */
4004VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4005{
4006 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4007 uSel & ~X86_SEL_RPL, 0);
4008}
4009
4010
4011/** \#SS(err) - 0c. */
4012VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4013{
4014 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4015}
4016
4017
4018/** \#GP(n) - 0d. */
4019VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4020{
4021 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4022}
4023
4024
4025/** \#GP(0) - 0d. */
4026VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4027{
4028 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4029}
4030
4031#ifdef IEM_WITH_SETJMP
4032/** \#GP(0) - 0d. */
4033DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4034{
4035 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4036}
4037#endif
4038
4039
4040/** \#GP(sel) - 0d. */
4041VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4042{
4043 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4044 Sel & ~X86_SEL_RPL, 0);
4045}
4046
4047
4048/** \#GP(0) - 0d. */
4049VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4050{
4051 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4052}
4053
4054
4055/** \#GP(sel) - 0d. */
4056VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4057{
4058 NOREF(iSegReg); NOREF(fAccess);
4059 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4060 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4061}
4062
4063#ifdef IEM_WITH_SETJMP
4064/** \#GP(sel) - 0d, longjmp. */
4065DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4066{
4067 NOREF(iSegReg); NOREF(fAccess);
4068 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4069 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4070}
4071#endif
4072
4073/** \#GP(sel) - 0d. */
4074VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4075{
4076 NOREF(Sel);
4077 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4078}
4079
4080#ifdef IEM_WITH_SETJMP
4081/** \#GP(sel) - 0d, longjmp. */
4082DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4083{
4084 NOREF(Sel);
4085 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4086}
4087#endif
4088
4089
4090/** \#GP(sel) - 0d. */
4091VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4092{
4093 NOREF(iSegReg); NOREF(fAccess);
4094 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4095}
4096
4097#ifdef IEM_WITH_SETJMP
4098/** \#GP(sel) - 0d, longjmp. */
4099DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4100{
4101 NOREF(iSegReg); NOREF(fAccess);
4102 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4103}
4104#endif
4105
4106
4107/** \#PF(n) - 0e. */
4108VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4109{
4110 uint16_t uErr;
4111 switch (rc)
4112 {
4113 case VERR_PAGE_NOT_PRESENT:
4114 case VERR_PAGE_TABLE_NOT_PRESENT:
4115 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4116 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4117 uErr = 0;
4118 break;
4119
4120 default:
4121 AssertMsgFailed(("%Rrc\n", rc));
4122 RT_FALL_THRU();
4123 case VERR_ACCESS_DENIED:
4124 uErr = X86_TRAP_PF_P;
4125 break;
4126
4127 /** @todo reserved */
4128 }
4129
4130 if (pVCpu->iem.s.uCpl == 3)
4131 uErr |= X86_TRAP_PF_US;
4132
4133 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4134 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4135 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4136 uErr |= X86_TRAP_PF_ID;
4137
4138#if 0 /* This is so much non-sense, really. Why was it done like that? */
4139 /* Note! RW access callers reporting a WRITE protection fault, will clear
4140 the READ flag before calling. So, read-modify-write accesses (RW)
4141 can safely be reported as READ faults. */
4142 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4143 uErr |= X86_TRAP_PF_RW;
4144#else
4145 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4146 {
4147 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4148 /// (regardless of outcome of the comparison in the latter case).
4149 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4150 uErr |= X86_TRAP_PF_RW;
4151 }
4152#endif
4153
4154 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4155 uErr, GCPtrWhere);
4156}
4157
4158#ifdef IEM_WITH_SETJMP
4159/** \#PF(n) - 0e, longjmp. */
4160DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4161{
4162 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4163}
4164#endif
4165
4166
4167/** \#MF(0) - 10. */
4168VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4169{
4170 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4172
4173 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4174 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4175 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4176}
4177
4178
4179/** \#AC(0) - 11. */
4180VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4181{
4182 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4183}
4184
4185#ifdef IEM_WITH_SETJMP
4186/** \#AC(0) - 11, longjmp. */
4187DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4188{
4189 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4190}
4191#endif
4192
4193
4194/** \#XF(0)/\#XM(0) - 19. */
4195VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4196{
4197 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4198}
4199
4200
4201/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4202IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4203{
4204 NOREF(cbInstr);
4205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4206}
4207
4208
4209/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4210IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4211{
4212 NOREF(cbInstr);
4213 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4214}
4215
4216
4217/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4218IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4219{
4220 NOREF(cbInstr);
4221 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4222}
4223
4224
4225/** @} */
4226
4227/** @name Common opcode decoders.
4228 * @{
4229 */
4230//#include <iprt/mem.h>
4231
4232/**
4233 * Used to add extra details about a stub case.
4234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4235 */
4236void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4237{
4238#if defined(LOG_ENABLED) && defined(IN_RING3)
4239 PVM pVM = pVCpu->CTX_SUFF(pVM);
4240 char szRegs[4096];
4241 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4242 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4243 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4244 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4245 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4246 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4247 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4248 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4249 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4250 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4251 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4252 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4253 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4254 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4255 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4256 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4257 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4258 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4259 " efer=%016VR{efer}\n"
4260 " pat=%016VR{pat}\n"
4261 " sf_mask=%016VR{sf_mask}\n"
4262 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4263 " lstar=%016VR{lstar}\n"
4264 " star=%016VR{star} cstar=%016VR{cstar}\n"
4265 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4266 );
4267
4268 char szInstr[256];
4269 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4270 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4271 szInstr, sizeof(szInstr), NULL);
4272
4273 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4274#else
4275 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4276#endif
4277}
4278
4279/** @} */
4280
4281
4282
4283/** @name Register Access.
4284 * @{
4285 */
4286
4287/**
4288 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4289 *
4290 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4291 * segment limit.
4292 *
4293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4294 * @param cbInstr Instruction size.
4295 * @param offNextInstr The offset of the next instruction.
4296 * @param enmEffOpSize Effective operand size.
4297 */
4298VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4299 IEMMODE enmEffOpSize) RT_NOEXCEPT
4300{
4301 switch (enmEffOpSize)
4302 {
4303 case IEMMODE_16BIT:
4304 {
4305 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4306 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4307 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no CS limit checks in 64-bit mode */))
4308 pVCpu->cpum.GstCtx.rip = uNewIp;
4309 else
4310 return iemRaiseGeneralProtectionFault0(pVCpu);
4311 break;
4312 }
4313
4314 case IEMMODE_32BIT:
4315 {
4316 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4317 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4318
4319 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4320 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4321 pVCpu->cpum.GstCtx.rip = uNewEip;
4322 else
4323 return iemRaiseGeneralProtectionFault0(pVCpu);
4324 break;
4325 }
4326
4327 case IEMMODE_64BIT:
4328 {
4329 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4330
4331 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4332 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4333 pVCpu->cpum.GstCtx.rip = uNewRip;
4334 else
4335 return iemRaiseGeneralProtectionFault0(pVCpu);
4336 break;
4337 }
4338
4339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4340 }
4341
4342#ifndef IEM_WITH_CODE_TLB
4343 /* Flush the prefetch buffer. */
4344 pVCpu->iem.s.cbOpcode = cbInstr;
4345#endif
4346
4347 /*
4348 * Clear RF and finish the instruction (maybe raise #DB).
4349 */
4350 return iemRegFinishClearingRF(pVCpu);
4351}
4352
4353
4354/**
4355 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4356 *
4357 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4358 * segment limit.
4359 *
4360 * @returns Strict VBox status code.
4361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4362 * @param cbInstr Instruction size.
4363 * @param offNextInstr The offset of the next instruction.
4364 */
4365VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4366{
4367 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4368
4369 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4370 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4371 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checking in 64-bit mode */))
4372 pVCpu->cpum.GstCtx.rip = uNewIp;
4373 else
4374 return iemRaiseGeneralProtectionFault0(pVCpu);
4375
4376#ifndef IEM_WITH_CODE_TLB
4377 /* Flush the prefetch buffer. */
4378 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4379#endif
4380
4381 /*
4382 * Clear RF and finish the instruction (maybe raise #DB).
4383 */
4384 return iemRegFinishClearingRF(pVCpu);
4385}
4386
4387
4388/**
4389 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4390 *
4391 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4392 * segment limit.
4393 *
4394 * @returns Strict VBox status code.
4395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4396 * @param cbInstr Instruction size.
4397 * @param offNextInstr The offset of the next instruction.
4398 * @param enmEffOpSize Effective operand size.
4399 */
4400VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4401 IEMMODE enmEffOpSize) RT_NOEXCEPT
4402{
4403 if (enmEffOpSize == IEMMODE_32BIT)
4404 {
4405 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4406
4407 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4408 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4409 pVCpu->cpum.GstCtx.rip = uNewEip;
4410 else
4411 return iemRaiseGeneralProtectionFault0(pVCpu);
4412 }
4413 else
4414 {
4415 Assert(enmEffOpSize == IEMMODE_64BIT);
4416
4417 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4418 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4419 pVCpu->cpum.GstCtx.rip = uNewRip;
4420 else
4421 return iemRaiseGeneralProtectionFault0(pVCpu);
4422 }
4423
4424#ifndef IEM_WITH_CODE_TLB
4425 /* Flush the prefetch buffer. */
4426 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4427#endif
4428
4429 /*
4430 * Clear RF and finish the instruction (maybe raise #DB).
4431 */
4432 return iemRegFinishClearingRF(pVCpu);
4433}
4434
4435
4436/**
4437 * Performs a near jump to the specified address.
4438 *
4439 * May raise a \#GP(0) if the new IP outside the code segment limit.
4440 *
4441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4442 * @param uNewIp The new IP value.
4443 */
4444VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4445{
4446 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4447 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checks in 64-bit mode */))
4448 pVCpu->cpum.GstCtx.rip = uNewIp;
4449 else
4450 return iemRaiseGeneralProtectionFault0(pVCpu);
4451 /** @todo Test 16-bit jump in 64-bit mode. */
4452
4453#ifndef IEM_WITH_CODE_TLB
4454 /* Flush the prefetch buffer. */
4455 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4456#endif
4457
4458 /*
4459 * Clear RF and finish the instruction (maybe raise #DB).
4460 */
4461 return iemRegFinishClearingRF(pVCpu);
4462}
4463
4464
4465/**
4466 * Performs a near jump to the specified address.
4467 *
4468 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4469 *
4470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4471 * @param uNewEip The new EIP value.
4472 */
4473VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4474{
4475 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4476 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4477
4478 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4479 pVCpu->cpum.GstCtx.rip = uNewEip;
4480 else
4481 return iemRaiseGeneralProtectionFault0(pVCpu);
4482
4483#ifndef IEM_WITH_CODE_TLB
4484 /* Flush the prefetch buffer. */
4485 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4486#endif
4487
4488 /*
4489 * Clear RF and finish the instruction (maybe raise #DB).
4490 */
4491 return iemRegFinishClearingRF(pVCpu);
4492}
4493
4494
4495/**
4496 * Performs a near jump to the specified address.
4497 *
4498 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4499 * segment limit.
4500 *
4501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4502 * @param uNewRip The new RIP value.
4503 */
4504VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4505{
4506 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4507
4508 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4509 pVCpu->cpum.GstCtx.rip = uNewRip;
4510 else
4511 return iemRaiseGeneralProtectionFault0(pVCpu);
4512
4513#ifndef IEM_WITH_CODE_TLB
4514 /* Flush the prefetch buffer. */
4515 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4516#endif
4517
4518 /*
4519 * Clear RF and finish the instruction (maybe raise #DB).
4520 */
4521 return iemRegFinishClearingRF(pVCpu);
4522}
4523
4524/** @} */
4525
4526
4527/** @name FPU access and helpers.
4528 *
4529 * @{
4530 */
4531
4532/**
4533 * Updates the x87.DS and FPUDP registers.
4534 *
4535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4536 * @param pFpuCtx The FPU context.
4537 * @param iEffSeg The effective segment register.
4538 * @param GCPtrEff The effective address relative to @a iEffSeg.
4539 */
4540DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4541{
4542 RTSEL sel;
4543 switch (iEffSeg)
4544 {
4545 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4546 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4547 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4548 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4549 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4550 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4551 default:
4552 AssertMsgFailed(("%d\n", iEffSeg));
4553 sel = pVCpu->cpum.GstCtx.ds.Sel;
4554 }
4555 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4556 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4557 {
4558 pFpuCtx->DS = 0;
4559 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4560 }
4561 else if (!IEM_IS_LONG_MODE(pVCpu))
4562 {
4563 pFpuCtx->DS = sel;
4564 pFpuCtx->FPUDP = GCPtrEff;
4565 }
4566 else
4567 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4568}
4569
4570
4571/**
4572 * Rotates the stack registers in the push direction.
4573 *
4574 * @param pFpuCtx The FPU context.
4575 * @remarks This is a complete waste of time, but fxsave stores the registers in
4576 * stack order.
4577 */
4578DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4579{
4580 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4581 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4582 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4583 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4584 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4585 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4586 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4587 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4588 pFpuCtx->aRegs[0].r80 = r80Tmp;
4589}
4590
4591
4592/**
4593 * Rotates the stack registers in the pop direction.
4594 *
4595 * @param pFpuCtx The FPU context.
4596 * @remarks This is a complete waste of time, but fxsave stores the registers in
4597 * stack order.
4598 */
4599DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4600{
4601 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4602 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4603 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4604 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4605 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4606 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4607 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4608 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4609 pFpuCtx->aRegs[7].r80 = r80Tmp;
4610}
4611
4612
4613/**
4614 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4615 * exception prevents it.
4616 *
4617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4618 * @param pResult The FPU operation result to push.
4619 * @param pFpuCtx The FPU context.
4620 */
4621static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4622{
4623 /* Update FSW and bail if there are pending exceptions afterwards. */
4624 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4625 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4626 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4627 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4628 {
4629 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4630 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4631 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4632 pFpuCtx->FSW = fFsw;
4633 return;
4634 }
4635
4636 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4637 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4638 {
4639 /* All is fine, push the actual value. */
4640 pFpuCtx->FTW |= RT_BIT(iNewTop);
4641 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4642 }
4643 else if (pFpuCtx->FCW & X86_FCW_IM)
4644 {
4645 /* Masked stack overflow, push QNaN. */
4646 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4647 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4648 }
4649 else
4650 {
4651 /* Raise stack overflow, don't push anything. */
4652 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4654 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4655 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4656 return;
4657 }
4658
4659 fFsw &= ~X86_FSW_TOP_MASK;
4660 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4661 pFpuCtx->FSW = fFsw;
4662
4663 iemFpuRotateStackPush(pFpuCtx);
4664 RT_NOREF(pVCpu);
4665}
4666
4667
4668/**
4669 * Stores a result in a FPU register and updates the FSW and FTW.
4670 *
4671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4672 * @param pFpuCtx The FPU context.
4673 * @param pResult The result to store.
4674 * @param iStReg Which FPU register to store it in.
4675 */
4676static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4677{
4678 Assert(iStReg < 8);
4679 uint16_t fNewFsw = pFpuCtx->FSW;
4680 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4681 fNewFsw &= ~X86_FSW_C_MASK;
4682 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4683 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4684 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4685 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4686 pFpuCtx->FSW = fNewFsw;
4687 pFpuCtx->FTW |= RT_BIT(iReg);
4688 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4689 RT_NOREF(pVCpu);
4690}
4691
4692
4693/**
4694 * Only updates the FPU status word (FSW) with the result of the current
4695 * instruction.
4696 *
4697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4698 * @param pFpuCtx The FPU context.
4699 * @param u16FSW The FSW output of the current instruction.
4700 */
4701static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4702{
4703 uint16_t fNewFsw = pFpuCtx->FSW;
4704 fNewFsw &= ~X86_FSW_C_MASK;
4705 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4706 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4707 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4708 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4709 pFpuCtx->FSW = fNewFsw;
4710 RT_NOREF(pVCpu);
4711}
4712
4713
4714/**
4715 * Pops one item off the FPU stack if no pending exception prevents it.
4716 *
4717 * @param pFpuCtx The FPU context.
4718 */
4719static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4720{
4721 /* Check pending exceptions. */
4722 uint16_t uFSW = pFpuCtx->FSW;
4723 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4724 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4725 return;
4726
4727 /* TOP--. */
4728 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4729 uFSW &= ~X86_FSW_TOP_MASK;
4730 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4731 pFpuCtx->FSW = uFSW;
4732
4733 /* Mark the previous ST0 as empty. */
4734 iOldTop >>= X86_FSW_TOP_SHIFT;
4735 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4736
4737 /* Rotate the registers. */
4738 iemFpuRotateStackPop(pFpuCtx);
4739}
4740
4741
4742/**
4743 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4744 *
4745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4746 * @param pResult The FPU operation result to push.
4747 */
4748void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4749{
4750 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4752 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4753}
4754
4755
4756/**
4757 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4758 * and sets FPUDP and FPUDS.
4759 *
4760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4761 * @param pResult The FPU operation result to push.
4762 * @param iEffSeg The effective segment register.
4763 * @param GCPtrEff The effective address relative to @a iEffSeg.
4764 */
4765void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4766{
4767 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4768 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4769 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4770 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4771}
4772
4773
4774/**
4775 * Replace ST0 with the first value and push the second onto the FPU stack,
4776 * unless a pending exception prevents it.
4777 *
4778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4779 * @param pResult The FPU operation result to store and push.
4780 */
4781void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4782{
4783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4784 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4785
4786 /* Update FSW and bail if there are pending exceptions afterwards. */
4787 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4788 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4789 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4790 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4791 {
4792 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4793 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4794 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4795 pFpuCtx->FSW = fFsw;
4796 return;
4797 }
4798
4799 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4800 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4801 {
4802 /* All is fine, push the actual value. */
4803 pFpuCtx->FTW |= RT_BIT(iNewTop);
4804 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4805 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4806 }
4807 else if (pFpuCtx->FCW & X86_FCW_IM)
4808 {
4809 /* Masked stack overflow, push QNaN. */
4810 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4811 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4812 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4813 }
4814 else
4815 {
4816 /* Raise stack overflow, don't push anything. */
4817 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4818 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4819 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4820 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4821 return;
4822 }
4823
4824 fFsw &= ~X86_FSW_TOP_MASK;
4825 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4826 pFpuCtx->FSW = fFsw;
4827
4828 iemFpuRotateStackPush(pFpuCtx);
4829}
4830
4831
4832/**
4833 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4834 * FOP.
4835 *
4836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4837 * @param pResult The result to store.
4838 * @param iStReg Which FPU register to store it in.
4839 */
4840void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4841{
4842 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4843 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4844 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4845}
4846
4847
4848/**
4849 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4850 * FOP, and then pops the stack.
4851 *
4852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4853 * @param pResult The result to store.
4854 * @param iStReg Which FPU register to store it in.
4855 */
4856void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4857{
4858 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4859 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4860 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4861 iemFpuMaybePopOne(pFpuCtx);
4862}
4863
4864
4865/**
4866 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4867 * FPUDP, and FPUDS.
4868 *
4869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4870 * @param pResult The result to store.
4871 * @param iStReg Which FPU register to store it in.
4872 * @param iEffSeg The effective memory operand selector register.
4873 * @param GCPtrEff The effective memory operand offset.
4874 */
4875void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4876 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4877{
4878 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4879 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4881 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4882}
4883
4884
4885/**
4886 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4887 * FPUDP, and FPUDS, and then pops the stack.
4888 *
4889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4890 * @param pResult The result to store.
4891 * @param iStReg Which FPU register to store it in.
4892 * @param iEffSeg The effective memory operand selector register.
4893 * @param GCPtrEff The effective memory operand offset.
4894 */
4895void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4896 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4897{
4898 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4899 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4900 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4901 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4902 iemFpuMaybePopOne(pFpuCtx);
4903}
4904
4905
4906/**
4907 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4908 *
4909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4910 */
4911void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4912{
4913 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4914 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4915}
4916
4917
4918/**
4919 * Updates the FSW, FOP, FPUIP, and FPUCS.
4920 *
4921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4922 * @param u16FSW The FSW from the current instruction.
4923 */
4924void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4925{
4926 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4927 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4928 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4929}
4930
4931
4932/**
4933 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4934 *
4935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4936 * @param u16FSW The FSW from the current instruction.
4937 */
4938void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4939{
4940 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4941 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4942 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4943 iemFpuMaybePopOne(pFpuCtx);
4944}
4945
4946
4947/**
4948 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4949 *
4950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4951 * @param u16FSW The FSW from the current instruction.
4952 * @param iEffSeg The effective memory operand selector register.
4953 * @param GCPtrEff The effective memory operand offset.
4954 */
4955void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4956{
4957 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4958 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4959 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4960 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4961}
4962
4963
4964/**
4965 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4966 *
4967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4968 * @param u16FSW The FSW from the current instruction.
4969 */
4970void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4974 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4975 iemFpuMaybePopOne(pFpuCtx);
4976 iemFpuMaybePopOne(pFpuCtx);
4977}
4978
4979
4980/**
4981 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param u16FSW The FSW from the current instruction.
4985 * @param iEffSeg The effective memory operand selector register.
4986 * @param GCPtrEff The effective memory operand offset.
4987 */
4988void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4989{
4990 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4991 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4992 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4993 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4994 iemFpuMaybePopOne(pFpuCtx);
4995}
4996
4997
4998/**
4999 * Worker routine for raising an FPU stack underflow exception.
5000 *
5001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5002 * @param pFpuCtx The FPU context.
5003 * @param iStReg The stack register being accessed.
5004 */
5005static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5006{
5007 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5008 if (pFpuCtx->FCW & X86_FCW_IM)
5009 {
5010 /* Masked underflow. */
5011 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5012 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5013 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5014 if (iStReg != UINT8_MAX)
5015 {
5016 pFpuCtx->FTW |= RT_BIT(iReg);
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5018 }
5019 }
5020 else
5021 {
5022 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5023 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5024 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5025 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5026 }
5027 RT_NOREF(pVCpu);
5028}
5029
5030
5031/**
5032 * Raises a FPU stack underflow exception.
5033 *
5034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5035 * @param iStReg The destination register that should be loaded
5036 * with QNaN if \#IS is not masked. Specify
5037 * UINT8_MAX if none (like for fcom).
5038 */
5039void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5040{
5041 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5042 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5043 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5044}
5045
5046
5047void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5051 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5052 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5053}
5054
5055
5056void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5057{
5058 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5059 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5060 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5061 iemFpuMaybePopOne(pFpuCtx);
5062}
5063
5064
5065void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5066{
5067 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5068 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5069 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5070 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5071 iemFpuMaybePopOne(pFpuCtx);
5072}
5073
5074
5075void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5076{
5077 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5078 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5079 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5080 iemFpuMaybePopOne(pFpuCtx);
5081 iemFpuMaybePopOne(pFpuCtx);
5082}
5083
5084
5085void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5089
5090 if (pFpuCtx->FCW & X86_FCW_IM)
5091 {
5092 /* Masked overflow - Push QNaN. */
5093 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5094 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5095 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5096 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5097 pFpuCtx->FTW |= RT_BIT(iNewTop);
5098 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5099 iemFpuRotateStackPush(pFpuCtx);
5100 }
5101 else
5102 {
5103 /* Exception pending - don't change TOP or the register stack. */
5104 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5105 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5106 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5107 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5108 }
5109}
5110
5111
5112void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5113{
5114 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5115 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5116
5117 if (pFpuCtx->FCW & X86_FCW_IM)
5118 {
5119 /* Masked overflow - Push QNaN. */
5120 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5121 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5122 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5123 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5124 pFpuCtx->FTW |= RT_BIT(iNewTop);
5125 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5126 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5127 iemFpuRotateStackPush(pFpuCtx);
5128 }
5129 else
5130 {
5131 /* Exception pending - don't change TOP or the register stack. */
5132 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5133 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5134 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5135 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5136 }
5137}
5138
5139
5140/**
5141 * Worker routine for raising an FPU stack overflow exception on a push.
5142 *
5143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5144 * @param pFpuCtx The FPU context.
5145 */
5146static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5147{
5148 if (pFpuCtx->FCW & X86_FCW_IM)
5149 {
5150 /* Masked overflow. */
5151 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5152 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5153 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5154 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5155 pFpuCtx->FTW |= RT_BIT(iNewTop);
5156 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5157 iemFpuRotateStackPush(pFpuCtx);
5158 }
5159 else
5160 {
5161 /* Exception pending - don't change TOP or the register stack. */
5162 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5163 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5164 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5165 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5166 }
5167 RT_NOREF(pVCpu);
5168}
5169
5170
5171/**
5172 * Raises a FPU stack overflow exception on a push.
5173 *
5174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5175 */
5176void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5177{
5178 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5179 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5180 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5181}
5182
5183
5184/**
5185 * Raises a FPU stack overflow exception on a push with a memory operand.
5186 *
5187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5188 * @param iEffSeg The effective memory operand selector register.
5189 * @param GCPtrEff The effective memory operand offset.
5190 */
5191void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5192{
5193 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5194 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5195 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5196 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5197}
5198
5199/** @} */
5200
5201
5202/** @name SSE+AVX SIMD access and helpers.
5203 *
5204 * @{
5205 */
5206/**
5207 * Stores a result in a SIMD XMM register, updates the MXCSR.
5208 *
5209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5210 * @param pResult The result to store.
5211 * @param iXmmReg Which SIMD XMM register to store the result in.
5212 */
5213void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5214{
5215 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5216 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5217
5218 /* The result is only updated if there is no unmasked exception pending. */
5219 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5220 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5221 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5222}
5223
5224
5225/**
5226 * Updates the MXCSR.
5227 *
5228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5229 * @param fMxcsr The new MXCSR value.
5230 */
5231void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5232{
5233 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5234 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5235}
5236/** @} */
5237
5238
5239/** @name Memory access.
5240 *
5241 * @{
5242 */
5243
5244
5245/**
5246 * Updates the IEMCPU::cbWritten counter if applicable.
5247 *
5248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5249 * @param fAccess The access being accounted for.
5250 * @param cbMem The access size.
5251 */
5252DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5253{
5254 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5255 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5256 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5257}
5258
5259
5260/**
5261 * Applies the segment limit, base and attributes.
5262 *
5263 * This may raise a \#GP or \#SS.
5264 *
5265 * @returns VBox strict status code.
5266 *
5267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5268 * @param fAccess The kind of access which is being performed.
5269 * @param iSegReg The index of the segment register to apply.
5270 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5271 * TSS, ++).
5272 * @param cbMem The access size.
5273 * @param pGCPtrMem Pointer to the guest memory address to apply
5274 * segmentation to. Input and output parameter.
5275 */
5276VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5277{
5278 if (iSegReg == UINT8_MAX)
5279 return VINF_SUCCESS;
5280
5281 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5282 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5283 switch (pVCpu->iem.s.enmCpuMode)
5284 {
5285 case IEMMODE_16BIT:
5286 case IEMMODE_32BIT:
5287 {
5288 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5289 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5290
5291 if ( pSel->Attr.n.u1Present
5292 && !pSel->Attr.n.u1Unusable)
5293 {
5294 Assert(pSel->Attr.n.u1DescType);
5295 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5296 {
5297 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5298 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5299 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5300
5301 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5302 {
5303 /** @todo CPL check. */
5304 }
5305
5306 /*
5307 * There are two kinds of data selectors, normal and expand down.
5308 */
5309 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5310 {
5311 if ( GCPtrFirst32 > pSel->u32Limit
5312 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5313 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5314 }
5315 else
5316 {
5317 /*
5318 * The upper boundary is defined by the B bit, not the G bit!
5319 */
5320 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5321 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5322 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5323 }
5324 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5325 }
5326 else
5327 {
5328 /*
5329 * Code selector and usually be used to read thru, writing is
5330 * only permitted in real and V8086 mode.
5331 */
5332 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5333 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5334 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5335 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5336 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5337
5338 if ( GCPtrFirst32 > pSel->u32Limit
5339 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5340 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5341
5342 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5343 {
5344 /** @todo CPL check. */
5345 }
5346
5347 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5348 }
5349 }
5350 else
5351 return iemRaiseGeneralProtectionFault0(pVCpu);
5352 return VINF_SUCCESS;
5353 }
5354
5355 case IEMMODE_64BIT:
5356 {
5357 RTGCPTR GCPtrMem = *pGCPtrMem;
5358 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5359 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5360
5361 Assert(cbMem >= 1);
5362 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5363 return VINF_SUCCESS;
5364 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5365 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5366 return iemRaiseGeneralProtectionFault0(pVCpu);
5367 }
5368
5369 default:
5370 AssertFailedReturn(VERR_IEM_IPE_7);
5371 }
5372}
5373
5374
5375/**
5376 * Translates a virtual address to a physical physical address and checks if we
5377 * can access the page as specified.
5378 *
5379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5380 * @param GCPtrMem The virtual address.
5381 * @param fAccess The intended access.
5382 * @param pGCPhysMem Where to return the physical address.
5383 */
5384VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5385{
5386 /** @todo Need a different PGM interface here. We're currently using
5387 * generic / REM interfaces. this won't cut it for R0. */
5388 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5389 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5390 * here. */
5391 PGMPTWALK Walk;
5392 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5393 if (RT_FAILURE(rc))
5394 {
5395 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5396 /** @todo Check unassigned memory in unpaged mode. */
5397 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5398#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5399 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5400 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5401#endif
5402 *pGCPhysMem = NIL_RTGCPHYS;
5403 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5404 }
5405
5406 /* If the page is writable and does not have the no-exec bit set, all
5407 access is allowed. Otherwise we'll have to check more carefully... */
5408 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5409 {
5410 /* Write to read only memory? */
5411 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5412 && !(Walk.fEffective & X86_PTE_RW)
5413 && ( ( pVCpu->iem.s.uCpl == 3
5414 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5415 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5416 {
5417 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5418 *pGCPhysMem = NIL_RTGCPHYS;
5419#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5420 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5421 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5422#endif
5423 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5424 }
5425
5426 /* Kernel memory accessed by userland? */
5427 if ( !(Walk.fEffective & X86_PTE_US)
5428 && pVCpu->iem.s.uCpl == 3
5429 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5430 {
5431 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5432 *pGCPhysMem = NIL_RTGCPHYS;
5433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5436#endif
5437 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5438 }
5439
5440 /* Executing non-executable memory? */
5441 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5442 && (Walk.fEffective & X86_PTE_PAE_NX)
5443 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5444 {
5445 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5446 *pGCPhysMem = NIL_RTGCPHYS;
5447#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5448 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5449 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5450#endif
5451 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5452 VERR_ACCESS_DENIED);
5453 }
5454 }
5455
5456 /*
5457 * Set the dirty / access flags.
5458 * ASSUMES this is set when the address is translated rather than on committ...
5459 */
5460 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5461 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5462 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5463 {
5464 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5465 AssertRC(rc2);
5466 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5467 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5468 }
5469
5470 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5471 *pGCPhysMem = GCPhys;
5472 return VINF_SUCCESS;
5473}
5474
5475
5476/**
5477 * Looks up a memory mapping entry.
5478 *
5479 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5481 * @param pvMem The memory address.
5482 * @param fAccess The access to.
5483 */
5484DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5485{
5486 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5487 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5488 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5489 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5490 return 0;
5491 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5492 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5493 return 1;
5494 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5495 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5496 return 2;
5497 return VERR_NOT_FOUND;
5498}
5499
5500
5501/**
5502 * Finds a free memmap entry when using iNextMapping doesn't work.
5503 *
5504 * @returns Memory mapping index, 1024 on failure.
5505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5506 */
5507static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5508{
5509 /*
5510 * The easy case.
5511 */
5512 if (pVCpu->iem.s.cActiveMappings == 0)
5513 {
5514 pVCpu->iem.s.iNextMapping = 1;
5515 return 0;
5516 }
5517
5518 /* There should be enough mappings for all instructions. */
5519 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5520
5521 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5522 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5523 return i;
5524
5525 AssertFailedReturn(1024);
5526}
5527
5528
5529/**
5530 * Commits a bounce buffer that needs writing back and unmaps it.
5531 *
5532 * @returns Strict VBox status code.
5533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5534 * @param iMemMap The index of the buffer to commit.
5535 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5536 * Always false in ring-3, obviously.
5537 */
5538static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5539{
5540 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5541 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5542#ifdef IN_RING3
5543 Assert(!fPostponeFail);
5544 RT_NOREF_PV(fPostponeFail);
5545#endif
5546
5547 /*
5548 * Do the writing.
5549 */
5550 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5551 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5552 {
5553 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5554 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5555 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5556 if (!pVCpu->iem.s.fBypassHandlers)
5557 {
5558 /*
5559 * Carefully and efficiently dealing with access handler return
5560 * codes make this a little bloated.
5561 */
5562 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5563 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5564 pbBuf,
5565 cbFirst,
5566 PGMACCESSORIGIN_IEM);
5567 if (rcStrict == VINF_SUCCESS)
5568 {
5569 if (cbSecond)
5570 {
5571 rcStrict = PGMPhysWrite(pVM,
5572 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5573 pbBuf + cbFirst,
5574 cbSecond,
5575 PGMACCESSORIGIN_IEM);
5576 if (rcStrict == VINF_SUCCESS)
5577 { /* nothing */ }
5578 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5579 {
5580 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5581 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5582 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5583 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5584 }
5585#ifndef IN_RING3
5586 else if (fPostponeFail)
5587 {
5588 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5590 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5591 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5592 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5593 return iemSetPassUpStatus(pVCpu, rcStrict);
5594 }
5595#endif
5596 else
5597 {
5598 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5599 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5600 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5601 return rcStrict;
5602 }
5603 }
5604 }
5605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5606 {
5607 if (!cbSecond)
5608 {
5609 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5610 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5611 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5612 }
5613 else
5614 {
5615 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5616 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5617 pbBuf + cbFirst,
5618 cbSecond,
5619 PGMACCESSORIGIN_IEM);
5620 if (rcStrict2 == VINF_SUCCESS)
5621 {
5622 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5623 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5624 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5625 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5626 }
5627 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5628 {
5629 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5630 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5631 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5632 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5633 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5634 }
5635#ifndef IN_RING3
5636 else if (fPostponeFail)
5637 {
5638 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5639 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5640 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5641 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5642 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5643 return iemSetPassUpStatus(pVCpu, rcStrict);
5644 }
5645#endif
5646 else
5647 {
5648 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5649 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5650 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5651 return rcStrict2;
5652 }
5653 }
5654 }
5655#ifndef IN_RING3
5656 else if (fPostponeFail)
5657 {
5658 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5659 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5660 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5661 if (!cbSecond)
5662 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5663 else
5664 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5665 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5666 return iemSetPassUpStatus(pVCpu, rcStrict);
5667 }
5668#endif
5669 else
5670 {
5671 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5674 return rcStrict;
5675 }
5676 }
5677 else
5678 {
5679 /*
5680 * No access handlers, much simpler.
5681 */
5682 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5683 if (RT_SUCCESS(rc))
5684 {
5685 if (cbSecond)
5686 {
5687 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5688 if (RT_SUCCESS(rc))
5689 { /* likely */ }
5690 else
5691 {
5692 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5695 return rc;
5696 }
5697 }
5698 }
5699 else
5700 {
5701 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5702 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5703 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5704 return rc;
5705 }
5706 }
5707 }
5708
5709#if defined(IEM_LOG_MEMORY_WRITES)
5710 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5711 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5712 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5713 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5714 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5715 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5716
5717 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5718 g_cbIemWrote = cbWrote;
5719 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5720#endif
5721
5722 /*
5723 * Free the mapping entry.
5724 */
5725 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5726 Assert(pVCpu->iem.s.cActiveMappings != 0);
5727 pVCpu->iem.s.cActiveMappings--;
5728 return VINF_SUCCESS;
5729}
5730
5731
5732/**
5733 * iemMemMap worker that deals with a request crossing pages.
5734 */
5735static VBOXSTRICTRC
5736iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5737{
5738 /*
5739 * Do the address translations.
5740 */
5741 RTGCPHYS GCPhysFirst;
5742 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5743 if (rcStrict != VINF_SUCCESS)
5744 return rcStrict;
5745
5746 RTGCPHYS GCPhysSecond;
5747 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5748 fAccess, &GCPhysSecond);
5749 if (rcStrict != VINF_SUCCESS)
5750 return rcStrict;
5751 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5752
5753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5754
5755 /*
5756 * Read in the current memory content if it's a read, execute or partial
5757 * write access.
5758 */
5759 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5760 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5761 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5762
5763 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5764 {
5765 if (!pVCpu->iem.s.fBypassHandlers)
5766 {
5767 /*
5768 * Must carefully deal with access handler status codes here,
5769 * makes the code a bit bloated.
5770 */
5771 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5772 if (rcStrict == VINF_SUCCESS)
5773 {
5774 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5775 if (rcStrict == VINF_SUCCESS)
5776 { /*likely */ }
5777 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5778 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5779 else
5780 {
5781 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5782 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5783 return rcStrict;
5784 }
5785 }
5786 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5787 {
5788 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5789 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5790 {
5791 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5792 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5793 }
5794 else
5795 {
5796 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5797 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5798 return rcStrict2;
5799 }
5800 }
5801 else
5802 {
5803 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5804 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5805 return rcStrict;
5806 }
5807 }
5808 else
5809 {
5810 /*
5811 * No informational status codes here, much more straight forward.
5812 */
5813 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5814 if (RT_SUCCESS(rc))
5815 {
5816 Assert(rc == VINF_SUCCESS);
5817 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5818 if (RT_SUCCESS(rc))
5819 Assert(rc == VINF_SUCCESS);
5820 else
5821 {
5822 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5823 return rc;
5824 }
5825 }
5826 else
5827 {
5828 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5829 return rc;
5830 }
5831 }
5832 }
5833#ifdef VBOX_STRICT
5834 else
5835 memset(pbBuf, 0xcc, cbMem);
5836 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5837 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5838#endif
5839 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5840
5841 /*
5842 * Commit the bounce buffer entry.
5843 */
5844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5849 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5850 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5851 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5852 pVCpu->iem.s.cActiveMappings++;
5853
5854 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5855 *ppvMem = pbBuf;
5856 return VINF_SUCCESS;
5857}
5858
5859
5860/**
5861 * iemMemMap woker that deals with iemMemPageMap failures.
5862 */
5863static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5864 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5865{
5866 /*
5867 * Filter out conditions we can handle and the ones which shouldn't happen.
5868 */
5869 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5870 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5871 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5872 {
5873 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5874 return rcMap;
5875 }
5876 pVCpu->iem.s.cPotentialExits++;
5877
5878 /*
5879 * Read in the current memory content if it's a read, execute or partial
5880 * write access.
5881 */
5882 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5883 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5884 {
5885 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5886 memset(pbBuf, 0xff, cbMem);
5887 else
5888 {
5889 int rc;
5890 if (!pVCpu->iem.s.fBypassHandlers)
5891 {
5892 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5893 if (rcStrict == VINF_SUCCESS)
5894 { /* nothing */ }
5895 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5896 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5897 else
5898 {
5899 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5900 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5901 return rcStrict;
5902 }
5903 }
5904 else
5905 {
5906 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5907 if (RT_SUCCESS(rc))
5908 { /* likely */ }
5909 else
5910 {
5911 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5912 GCPhysFirst, rc));
5913 return rc;
5914 }
5915 }
5916 }
5917 }
5918#ifdef VBOX_STRICT
5919 else
5920 memset(pbBuf, 0xcc, cbMem);
5921#endif
5922#ifdef VBOX_STRICT
5923 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5924 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5925#endif
5926
5927 /*
5928 * Commit the bounce buffer entry.
5929 */
5930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5933 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5934 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5935 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5936 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5937 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5938 pVCpu->iem.s.cActiveMappings++;
5939
5940 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5941 *ppvMem = pbBuf;
5942 return VINF_SUCCESS;
5943}
5944
5945
5946
5947/**
5948 * Maps the specified guest memory for the given kind of access.
5949 *
5950 * This may be using bounce buffering of the memory if it's crossing a page
5951 * boundary or if there is an access handler installed for any of it. Because
5952 * of lock prefix guarantees, we're in for some extra clutter when this
5953 * happens.
5954 *
5955 * This may raise a \#GP, \#SS, \#PF or \#AC.
5956 *
5957 * @returns VBox strict status code.
5958 *
5959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5960 * @param ppvMem Where to return the pointer to the mapped memory.
5961 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5962 * 8, 12, 16, 32 or 512. When used by string operations
5963 * it can be up to a page.
5964 * @param iSegReg The index of the segment register to use for this
5965 * access. The base and limits are checked. Use UINT8_MAX
5966 * to indicate that no segmentation is required (for IDT,
5967 * GDT and LDT accesses).
5968 * @param GCPtrMem The address of the guest memory.
5969 * @param fAccess How the memory is being accessed. The
5970 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5971 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5972 * when raising exceptions.
5973 * @param uAlignCtl Alignment control:
5974 * - Bits 15:0 is the alignment mask.
5975 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5976 * IEM_MEMMAP_F_ALIGN_SSE, and
5977 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5978 * Pass zero to skip alignment.
5979 */
5980VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5981 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5982{
5983 /*
5984 * Check the input and figure out which mapping entry to use.
5985 */
5986 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
5987 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
5988 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
5989 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5990 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5991
5992 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5993 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5994 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5995 {
5996 iMemMap = iemMemMapFindFree(pVCpu);
5997 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5998 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5999 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6000 pVCpu->iem.s.aMemMappings[2].fAccess),
6001 VERR_IEM_IPE_9);
6002 }
6003
6004 /*
6005 * Map the memory, checking that we can actually access it. If something
6006 * slightly complicated happens, fall back on bounce buffering.
6007 */
6008 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6009 if (rcStrict == VINF_SUCCESS)
6010 { /* likely */ }
6011 else
6012 return rcStrict;
6013
6014 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6015 { /* likely */ }
6016 else
6017 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6018
6019 /*
6020 * Alignment check.
6021 */
6022 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6023 { /* likelyish */ }
6024 else
6025 {
6026 /* Misaligned access. */
6027 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6028 {
6029 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6030 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6031 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6032 {
6033 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6034
6035 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6036 return iemRaiseAlignmentCheckException(pVCpu);
6037 }
6038 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6039 && iemMemAreAlignmentChecksEnabled(pVCpu)
6040/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6041 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6042 )
6043 return iemRaiseAlignmentCheckException(pVCpu);
6044 else
6045 return iemRaiseGeneralProtectionFault0(pVCpu);
6046 }
6047 }
6048
6049#ifdef IEM_WITH_DATA_TLB
6050 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6051
6052 /*
6053 * Get the TLB entry for this page.
6054 */
6055 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6056 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6057 if (pTlbe->uTag == uTag)
6058 {
6059# ifdef VBOX_WITH_STATISTICS
6060 pVCpu->iem.s.DataTlb.cTlbHits++;
6061# endif
6062 }
6063 else
6064 {
6065 pVCpu->iem.s.DataTlb.cTlbMisses++;
6066 PGMPTWALK Walk;
6067 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6068 if (RT_FAILURE(rc))
6069 {
6070 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6071# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6072 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6073 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6074# endif
6075 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6076 }
6077
6078 Assert(Walk.fSucceeded);
6079 pTlbe->uTag = uTag;
6080 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6081 pTlbe->GCPhys = Walk.GCPhys;
6082 pTlbe->pbMappingR3 = NULL;
6083 }
6084
6085 /*
6086 * Check TLB page table level access flags.
6087 */
6088 /* If the page is either supervisor only or non-writable, we need to do
6089 more careful access checks. */
6090 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6091 {
6092 /* Write to read only memory? */
6093 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6094 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6095 && ( ( pVCpu->iem.s.uCpl == 3
6096 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6097 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6098 {
6099 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6100# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6101 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6102 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6103# endif
6104 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6105 }
6106
6107 /* Kernel memory accessed by userland? */
6108 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6109 && pVCpu->iem.s.uCpl == 3
6110 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6111 {
6112 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6113# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6114 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6115 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6116# endif
6117 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6118 }
6119 }
6120
6121 /*
6122 * Set the dirty / access flags.
6123 * ASSUMES this is set when the address is translated rather than on commit...
6124 */
6125 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6126 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6127 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6128 {
6129 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6130 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6131 AssertRC(rc2);
6132 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6133 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6134 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6135 }
6136
6137 /*
6138 * Look up the physical page info if necessary.
6139 */
6140 uint8_t *pbMem = NULL;
6141 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6142# ifdef IN_RING3
6143 pbMem = pTlbe->pbMappingR3;
6144# else
6145 pbMem = NULL;
6146# endif
6147 else
6148 {
6149 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6150 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6151 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6152 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6153 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6154 { /* likely */ }
6155 else
6156 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6157 pTlbe->pbMappingR3 = NULL;
6158 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6159 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6160 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6161 &pbMem, &pTlbe->fFlagsAndPhysRev);
6162 AssertRCReturn(rc, rc);
6163# ifdef IN_RING3
6164 pTlbe->pbMappingR3 = pbMem;
6165# endif
6166 }
6167
6168 /*
6169 * Check the physical page level access and mapping.
6170 */
6171 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6172 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6173 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6174 { /* probably likely */ }
6175 else
6176 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6177 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6178 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6179 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6180 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6181 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6182
6183 if (pbMem)
6184 {
6185 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6186 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6187 fAccess |= IEM_ACCESS_NOT_LOCKED;
6188 }
6189 else
6190 {
6191 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6192 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6193 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6194 if (rcStrict != VINF_SUCCESS)
6195 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6196 }
6197
6198 void * const pvMem = pbMem;
6199
6200 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6201 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6202 if (fAccess & IEM_ACCESS_TYPE_READ)
6203 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6204
6205#else /* !IEM_WITH_DATA_TLB */
6206
6207 RTGCPHYS GCPhysFirst;
6208 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6209 if (rcStrict != VINF_SUCCESS)
6210 return rcStrict;
6211
6212 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6213 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6214 if (fAccess & IEM_ACCESS_TYPE_READ)
6215 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6216
6217 void *pvMem;
6218 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6219 if (rcStrict != VINF_SUCCESS)
6220 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6221
6222#endif /* !IEM_WITH_DATA_TLB */
6223
6224 /*
6225 * Fill in the mapping table entry.
6226 */
6227 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6228 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6229 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6230 pVCpu->iem.s.cActiveMappings += 1;
6231
6232 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6233 *ppvMem = pvMem;
6234
6235 return VINF_SUCCESS;
6236}
6237
6238
6239/**
6240 * Commits the guest memory if bounce buffered and unmaps it.
6241 *
6242 * @returns Strict VBox status code.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param pvMem The mapping.
6245 * @param fAccess The kind of access.
6246 */
6247VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6248{
6249 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6250 AssertReturn(iMemMap >= 0, iMemMap);
6251
6252 /* If it's bounce buffered, we may need to write back the buffer. */
6253 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6254 {
6255 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6256 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6257 }
6258 /* Otherwise unlock it. */
6259 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6260 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6261
6262 /* Free the entry. */
6263 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6264 Assert(pVCpu->iem.s.cActiveMappings != 0);
6265 pVCpu->iem.s.cActiveMappings--;
6266 return VINF_SUCCESS;
6267}
6268
6269#ifdef IEM_WITH_SETJMP
6270
6271/**
6272 * Maps the specified guest memory for the given kind of access, longjmp on
6273 * error.
6274 *
6275 * This may be using bounce buffering of the memory if it's crossing a page
6276 * boundary or if there is an access handler installed for any of it. Because
6277 * of lock prefix guarantees, we're in for some extra clutter when this
6278 * happens.
6279 *
6280 * This may raise a \#GP, \#SS, \#PF or \#AC.
6281 *
6282 * @returns Pointer to the mapped memory.
6283 *
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 * @param cbMem The number of bytes to map. This is usually 1,
6286 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6287 * string operations it can be up to a page.
6288 * @param iSegReg The index of the segment register to use for
6289 * this access. The base and limits are checked.
6290 * Use UINT8_MAX to indicate that no segmentation
6291 * is required (for IDT, GDT and LDT accesses).
6292 * @param GCPtrMem The address of the guest memory.
6293 * @param fAccess How the memory is being accessed. The
6294 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6295 * how to map the memory, while the
6296 * IEM_ACCESS_WHAT_XXX bit is used when raising
6297 * exceptions.
6298 * @param uAlignCtl Alignment control:
6299 * - Bits 15:0 is the alignment mask.
6300 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6301 * IEM_MEMMAP_F_ALIGN_SSE, and
6302 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6303 * Pass zero to skip alignment.
6304 */
6305void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6306 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6307{
6308 /*
6309 * Check the input, check segment access and adjust address
6310 * with segment base.
6311 */
6312 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6313 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6314 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6315
6316 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6317 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6318 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6319
6320 /*
6321 * Alignment check.
6322 */
6323 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6324 { /* likelyish */ }
6325 else
6326 {
6327 /* Misaligned access. */
6328 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6329 {
6330 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6331 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6332 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6333 {
6334 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6335
6336 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6337 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6338 }
6339 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6340 && iemMemAreAlignmentChecksEnabled(pVCpu)
6341/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6342 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6343 )
6344 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6345 else
6346 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6347 }
6348 }
6349
6350 /*
6351 * Figure out which mapping entry to use.
6352 */
6353 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6354 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6355 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6356 {
6357 iMemMap = iemMemMapFindFree(pVCpu);
6358 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6359 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6360 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6361 pVCpu->iem.s.aMemMappings[2].fAccess),
6362 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6363 }
6364
6365 /*
6366 * Crossing a page boundary?
6367 */
6368 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6369 { /* No (likely). */ }
6370 else
6371 {
6372 void *pvMem;
6373 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6374 if (rcStrict == VINF_SUCCESS)
6375 return pvMem;
6376 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6377 }
6378
6379#ifdef IEM_WITH_DATA_TLB
6380 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6381
6382 /*
6383 * Get the TLB entry for this page.
6384 */
6385 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6386 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6387 if (pTlbe->uTag == uTag)
6388 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6389 else
6390 {
6391 pVCpu->iem.s.DataTlb.cTlbMisses++;
6392 PGMPTWALK Walk;
6393 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6394 if (RT_FAILURE(rc))
6395 {
6396 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6397# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6398 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6399 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6400# endif
6401 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6402 }
6403
6404 Assert(Walk.fSucceeded);
6405 pTlbe->uTag = uTag;
6406 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6407 pTlbe->GCPhys = Walk.GCPhys;
6408 pTlbe->pbMappingR3 = NULL;
6409 }
6410
6411 /*
6412 * Check the flags and physical revision.
6413 */
6414 /** @todo make the caller pass these in with fAccess. */
6415 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6416 ? IEMTLBE_F_PT_NO_USER : 0;
6417 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6418 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6419 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6420 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6421 ? IEMTLBE_F_PT_NO_WRITE : 0)
6422 : 0;
6423 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6424 uint8_t *pbMem = NULL;
6425 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6426 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6427# ifdef IN_RING3
6428 pbMem = pTlbe->pbMappingR3;
6429# else
6430 pbMem = NULL;
6431# endif
6432 else
6433 {
6434 /*
6435 * Okay, something isn't quite right or needs refreshing.
6436 */
6437 /* Write to read only memory? */
6438 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6439 {
6440 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6441# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6442 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6443 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6444# endif
6445 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6446 }
6447
6448 /* Kernel memory accessed by userland? */
6449 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6450 {
6451 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6452# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6453 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6454 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6455# endif
6456 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6457 }
6458
6459 /* Set the dirty / access flags.
6460 ASSUMES this is set when the address is translated rather than on commit... */
6461 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6462 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6463 {
6464 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6465 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6466 AssertRC(rc2);
6467 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6468 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6469 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6470 }
6471
6472 /*
6473 * Check if the physical page info needs updating.
6474 */
6475 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6476# ifdef IN_RING3
6477 pbMem = pTlbe->pbMappingR3;
6478# else
6479 pbMem = NULL;
6480# endif
6481 else
6482 {
6483 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6484 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6485 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6486 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6487 pTlbe->pbMappingR3 = NULL;
6488 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6489 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6490 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6491 &pbMem, &pTlbe->fFlagsAndPhysRev);
6492 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6493# ifdef IN_RING3
6494 pTlbe->pbMappingR3 = pbMem;
6495# endif
6496 }
6497
6498 /*
6499 * Check the physical page level access and mapping.
6500 */
6501 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6502 { /* probably likely */ }
6503 else
6504 {
6505 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6506 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6507 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6508 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6509 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6510 if (rcStrict == VINF_SUCCESS)
6511 return pbMem;
6512 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6513 }
6514 }
6515 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6516
6517 if (pbMem)
6518 {
6519 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6520 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6521 fAccess |= IEM_ACCESS_NOT_LOCKED;
6522 }
6523 else
6524 {
6525 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6526 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6527 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6528 if (rcStrict == VINF_SUCCESS)
6529 return pbMem;
6530 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6531 }
6532
6533 void * const pvMem = pbMem;
6534
6535 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6536 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6537 if (fAccess & IEM_ACCESS_TYPE_READ)
6538 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6539
6540#else /* !IEM_WITH_DATA_TLB */
6541
6542
6543 RTGCPHYS GCPhysFirst;
6544 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6545 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6546 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6547
6548 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6549 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6550 if (fAccess & IEM_ACCESS_TYPE_READ)
6551 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6552
6553 void *pvMem;
6554 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6555 if (rcStrict == VINF_SUCCESS)
6556 { /* likely */ }
6557 else
6558 {
6559 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6560 if (rcStrict == VINF_SUCCESS)
6561 return pvMem;
6562 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6563 }
6564
6565#endif /* !IEM_WITH_DATA_TLB */
6566
6567 /*
6568 * Fill in the mapping table entry.
6569 */
6570 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6571 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6572 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6573 pVCpu->iem.s.cActiveMappings++;
6574
6575 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6576 return pvMem;
6577}
6578
6579
6580/**
6581 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6582 *
6583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6584 * @param pvMem The mapping.
6585 * @param fAccess The kind of access.
6586 */
6587void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6588{
6589 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6590 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6591
6592 /* If it's bounce buffered, we may need to write back the buffer. */
6593 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6594 {
6595 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6596 {
6597 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6598 if (rcStrict == VINF_SUCCESS)
6599 return;
6600 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6601 }
6602 }
6603 /* Otherwise unlock it. */
6604 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6605 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6606
6607 /* Free the entry. */
6608 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6609 Assert(pVCpu->iem.s.cActiveMappings != 0);
6610 pVCpu->iem.s.cActiveMappings--;
6611}
6612
6613#endif /* IEM_WITH_SETJMP */
6614
6615#ifndef IN_RING3
6616/**
6617 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6618 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6619 *
6620 * Allows the instruction to be completed and retired, while the IEM user will
6621 * return to ring-3 immediately afterwards and do the postponed writes there.
6622 *
6623 * @returns VBox status code (no strict statuses). Caller must check
6624 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6626 * @param pvMem The mapping.
6627 * @param fAccess The kind of access.
6628 */
6629VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6630{
6631 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6632 AssertReturn(iMemMap >= 0, iMemMap);
6633
6634 /* If it's bounce buffered, we may need to write back the buffer. */
6635 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6636 {
6637 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6638 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6639 }
6640 /* Otherwise unlock it. */
6641 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6642 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6643
6644 /* Free the entry. */
6645 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6646 Assert(pVCpu->iem.s.cActiveMappings != 0);
6647 pVCpu->iem.s.cActiveMappings--;
6648 return VINF_SUCCESS;
6649}
6650#endif
6651
6652
6653/**
6654 * Rollbacks mappings, releasing page locks and such.
6655 *
6656 * The caller shall only call this after checking cActiveMappings.
6657 *
6658 * @returns Strict VBox status code to pass up.
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 */
6661void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6662{
6663 Assert(pVCpu->iem.s.cActiveMappings > 0);
6664
6665 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6666 while (iMemMap-- > 0)
6667 {
6668 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6669 if (fAccess != IEM_ACCESS_INVALID)
6670 {
6671 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6672 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6673 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6674 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6675 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6676 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6677 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6678 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6679 pVCpu->iem.s.cActiveMappings--;
6680 }
6681 }
6682}
6683
6684
6685/**
6686 * Fetches a data byte.
6687 *
6688 * @returns Strict VBox status code.
6689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6690 * @param pu8Dst Where to return the byte.
6691 * @param iSegReg The index of the segment register to use for
6692 * this access. The base and limits are checked.
6693 * @param GCPtrMem The address of the guest memory.
6694 */
6695VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6696{
6697 /* The lazy approach for now... */
6698 uint8_t const *pu8Src;
6699 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6700 if (rc == VINF_SUCCESS)
6701 {
6702 *pu8Dst = *pu8Src;
6703 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6704 }
6705 return rc;
6706}
6707
6708
6709#ifdef IEM_WITH_SETJMP
6710/**
6711 * Fetches a data byte, longjmp on error.
6712 *
6713 * @returns The byte.
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 * @param iSegReg The index of the segment register to use for
6716 * this access. The base and limits are checked.
6717 * @param GCPtrMem The address of the guest memory.
6718 */
6719uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6720{
6721 /* The lazy approach for now... */
6722 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6723 uint8_t const bRet = *pu8Src;
6724 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6725 return bRet;
6726}
6727#endif /* IEM_WITH_SETJMP */
6728
6729
6730/**
6731 * Fetches a data word.
6732 *
6733 * @returns Strict VBox status code.
6734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6735 * @param pu16Dst Where to return the word.
6736 * @param iSegReg The index of the segment register to use for
6737 * this access. The base and limits are checked.
6738 * @param GCPtrMem The address of the guest memory.
6739 */
6740VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6741{
6742 /* The lazy approach for now... */
6743 uint16_t const *pu16Src;
6744 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6745 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6746 if (rc == VINF_SUCCESS)
6747 {
6748 *pu16Dst = *pu16Src;
6749 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6750 }
6751 return rc;
6752}
6753
6754
6755#ifdef IEM_WITH_SETJMP
6756/**
6757 * Fetches a data word, longjmp on error.
6758 *
6759 * @returns The word
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 * @param iSegReg The index of the segment register to use for
6762 * this access. The base and limits are checked.
6763 * @param GCPtrMem The address of the guest memory.
6764 */
6765uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6766{
6767 /* The lazy approach for now... */
6768 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6769 sizeof(*pu16Src) - 1);
6770 uint16_t const u16Ret = *pu16Src;
6771 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6772 return u16Ret;
6773}
6774#endif
6775
6776
6777/**
6778 * Fetches a data dword.
6779 *
6780 * @returns Strict VBox status code.
6781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6782 * @param pu32Dst Where to return the dword.
6783 * @param iSegReg The index of the segment register to use for
6784 * this access. The base and limits are checked.
6785 * @param GCPtrMem The address of the guest memory.
6786 */
6787VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6788{
6789 /* The lazy approach for now... */
6790 uint32_t const *pu32Src;
6791 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6792 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6793 if (rc == VINF_SUCCESS)
6794 {
6795 *pu32Dst = *pu32Src;
6796 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6797 }
6798 return rc;
6799}
6800
6801
6802/**
6803 * Fetches a data dword and zero extends it to a qword.
6804 *
6805 * @returns Strict VBox status code.
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 * @param pu64Dst Where to return the qword.
6808 * @param iSegReg The index of the segment register to use for
6809 * this access. The base and limits are checked.
6810 * @param GCPtrMem The address of the guest memory.
6811 */
6812VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6813{
6814 /* The lazy approach for now... */
6815 uint32_t const *pu32Src;
6816 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6817 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6818 if (rc == VINF_SUCCESS)
6819 {
6820 *pu64Dst = *pu32Src;
6821 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6822 }
6823 return rc;
6824}
6825
6826
6827#ifdef IEM_WITH_SETJMP
6828
6829/**
6830 * Fetches a data dword, longjmp on error, fallback/safe version.
6831 *
6832 * @returns The dword
6833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6834 * @param iSegReg The index of the segment register to use for
6835 * this access. The base and limits are checked.
6836 * @param GCPtrMem The address of the guest memory.
6837 */
6838uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6839{
6840 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6841 sizeof(*pu32Src) - 1);
6842 uint32_t const u32Ret = *pu32Src;
6843 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6844 return u32Ret;
6845}
6846
6847
6848/**
6849 * Fetches a data dword, longjmp on error.
6850 *
6851 * @returns The dword
6852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6853 * @param iSegReg The index of the segment register to use for
6854 * this access. The base and limits are checked.
6855 * @param GCPtrMem The address of the guest memory.
6856 */
6857uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6858{
6859# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6860 /*
6861 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6862 */
6863 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6864 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6865 {
6866 /*
6867 * TLB lookup.
6868 */
6869 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6870 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6871 if (pTlbe->uTag == uTag)
6872 {
6873 /*
6874 * Check TLB page table level access flags.
6875 */
6876 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6877 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6878 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6879 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6880 {
6881 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6882
6883 /*
6884 * Alignment check:
6885 */
6886 /** @todo check priority \#AC vs \#PF */
6887 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6888 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6889 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6890 || pVCpu->iem.s.uCpl != 3)
6891 {
6892 /*
6893 * Fetch and return the dword
6894 */
6895 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6896 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6897 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6898 }
6899 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6900 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6901 }
6902 }
6903 }
6904
6905 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6906 outdated page pointer, or other troubles. */
6907 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6908 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6909
6910# else
6911 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6912 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6913 uint32_t const u32Ret = *pu32Src;
6914 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6915 return u32Ret;
6916# endif
6917}
6918#endif
6919
6920
6921#ifdef SOME_UNUSED_FUNCTION
6922/**
6923 * Fetches a data dword and sign extends it to a qword.
6924 *
6925 * @returns Strict VBox status code.
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 * @param pu64Dst Where to return the sign extended value.
6928 * @param iSegReg The index of the segment register to use for
6929 * this access. The base and limits are checked.
6930 * @param GCPtrMem The address of the guest memory.
6931 */
6932VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6933{
6934 /* The lazy approach for now... */
6935 int32_t const *pi32Src;
6936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6937 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6938 if (rc == VINF_SUCCESS)
6939 {
6940 *pu64Dst = *pi32Src;
6941 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6942 }
6943#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6944 else
6945 *pu64Dst = 0;
6946#endif
6947 return rc;
6948}
6949#endif
6950
6951
6952/**
6953 * Fetches a data qword.
6954 *
6955 * @returns Strict VBox status code.
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 * @param pu64Dst Where to return the qword.
6958 * @param iSegReg The index of the segment register to use for
6959 * this access. The base and limits are checked.
6960 * @param GCPtrMem The address of the guest memory.
6961 */
6962VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6963{
6964 /* The lazy approach for now... */
6965 uint64_t const *pu64Src;
6966 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6967 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6968 if (rc == VINF_SUCCESS)
6969 {
6970 *pu64Dst = *pu64Src;
6971 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6972 }
6973 return rc;
6974}
6975
6976
6977#ifdef IEM_WITH_SETJMP
6978/**
6979 * Fetches a data qword, longjmp on error.
6980 *
6981 * @returns The qword.
6982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6983 * @param iSegReg The index of the segment register to use for
6984 * this access. The base and limits are checked.
6985 * @param GCPtrMem The address of the guest memory.
6986 */
6987uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6988{
6989 /* The lazy approach for now... */
6990 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6991 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6992 uint64_t const u64Ret = *pu64Src;
6993 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6994 return u64Ret;
6995}
6996#endif
6997
6998
6999/**
7000 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7001 *
7002 * @returns Strict VBox status code.
7003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7004 * @param pu64Dst Where to return the qword.
7005 * @param iSegReg The index of the segment register to use for
7006 * this access. The base and limits are checked.
7007 * @param GCPtrMem The address of the guest memory.
7008 */
7009VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7010{
7011 /* The lazy approach for now... */
7012 uint64_t const *pu64Src;
7013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7014 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7015 if (rc == VINF_SUCCESS)
7016 {
7017 *pu64Dst = *pu64Src;
7018 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7019 }
7020 return rc;
7021}
7022
7023
7024#ifdef IEM_WITH_SETJMP
7025/**
7026 * Fetches a data qword, longjmp on error.
7027 *
7028 * @returns The qword.
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 * @param iSegReg The index of the segment register to use for
7031 * this access. The base and limits are checked.
7032 * @param GCPtrMem The address of the guest memory.
7033 */
7034uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7035{
7036 /* The lazy approach for now... */
7037 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7038 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7039 uint64_t const u64Ret = *pu64Src;
7040 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7041 return u64Ret;
7042}
7043#endif
7044
7045
7046/**
7047 * Fetches a data tword.
7048 *
7049 * @returns Strict VBox status code.
7050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7051 * @param pr80Dst Where to return the tword.
7052 * @param iSegReg The index of the segment register to use for
7053 * this access. The base and limits are checked.
7054 * @param GCPtrMem The address of the guest memory.
7055 */
7056VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7057{
7058 /* The lazy approach for now... */
7059 PCRTFLOAT80U pr80Src;
7060 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7061 if (rc == VINF_SUCCESS)
7062 {
7063 *pr80Dst = *pr80Src;
7064 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7065 }
7066 return rc;
7067}
7068
7069
7070#ifdef IEM_WITH_SETJMP
7071/**
7072 * Fetches a data tword, longjmp on error.
7073 *
7074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7075 * @param pr80Dst Where to return the tword.
7076 * @param iSegReg The index of the segment register to use for
7077 * this access. The base and limits are checked.
7078 * @param GCPtrMem The address of the guest memory.
7079 */
7080void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7081{
7082 /* The lazy approach for now... */
7083 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7084 *pr80Dst = *pr80Src;
7085 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7086}
7087#endif
7088
7089
7090/**
7091 * Fetches a data decimal tword.
7092 *
7093 * @returns Strict VBox status code.
7094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7095 * @param pd80Dst Where to return the tword.
7096 * @param iSegReg The index of the segment register to use for
7097 * this access. The base and limits are checked.
7098 * @param GCPtrMem The address of the guest memory.
7099 */
7100VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7101{
7102 /* The lazy approach for now... */
7103 PCRTPBCD80U pd80Src;
7104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7105 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7106 if (rc == VINF_SUCCESS)
7107 {
7108 *pd80Dst = *pd80Src;
7109 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7110 }
7111 return rc;
7112}
7113
7114
7115#ifdef IEM_WITH_SETJMP
7116/**
7117 * Fetches a data decimal tword, longjmp on error.
7118 *
7119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7120 * @param pd80Dst Where to return the tword.
7121 * @param iSegReg The index of the segment register to use for
7122 * this access. The base and limits are checked.
7123 * @param GCPtrMem The address of the guest memory.
7124 */
7125void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7126{
7127 /* The lazy approach for now... */
7128 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7129 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7130 *pd80Dst = *pd80Src;
7131 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7132}
7133#endif
7134
7135
7136/**
7137 * Fetches a data dqword (double qword), generally SSE related.
7138 *
7139 * @returns Strict VBox status code.
7140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7141 * @param pu128Dst Where to return the qword.
7142 * @param iSegReg The index of the segment register to use for
7143 * this access. The base and limits are checked.
7144 * @param GCPtrMem The address of the guest memory.
7145 */
7146VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7147{
7148 /* The lazy approach for now... */
7149 PCRTUINT128U pu128Src;
7150 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7151 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 pu128Dst->au64[0] = pu128Src->au64[0];
7155 pu128Dst->au64[1] = pu128Src->au64[1];
7156 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7157 }
7158 return rc;
7159}
7160
7161
7162#ifdef IEM_WITH_SETJMP
7163/**
7164 * Fetches a data dqword (double qword), generally SSE related.
7165 *
7166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7167 * @param pu128Dst Where to return the qword.
7168 * @param iSegReg The index of the segment register to use for
7169 * this access. The base and limits are checked.
7170 * @param GCPtrMem The address of the guest memory.
7171 */
7172void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7173{
7174 /* The lazy approach for now... */
7175 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7176 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7177 pu128Dst->au64[0] = pu128Src->au64[0];
7178 pu128Dst->au64[1] = pu128Src->au64[1];
7179 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7180}
7181#endif
7182
7183
7184/**
7185 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7186 * related.
7187 *
7188 * Raises \#GP(0) if not aligned.
7189 *
7190 * @returns Strict VBox status code.
7191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7192 * @param pu128Dst Where to return the qword.
7193 * @param iSegReg The index of the segment register to use for
7194 * this access. The base and limits are checked.
7195 * @param GCPtrMem The address of the guest memory.
7196 */
7197VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7198{
7199 /* The lazy approach for now... */
7200 PCRTUINT128U pu128Src;
7201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7202 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7203 if (rc == VINF_SUCCESS)
7204 {
7205 pu128Dst->au64[0] = pu128Src->au64[0];
7206 pu128Dst->au64[1] = pu128Src->au64[1];
7207 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7208 }
7209 return rc;
7210}
7211
7212
7213#ifdef IEM_WITH_SETJMP
7214/**
7215 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7216 * related, longjmp on error.
7217 *
7218 * Raises \#GP(0) if not aligned.
7219 *
7220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7221 * @param pu128Dst Where to return the qword.
7222 * @param iSegReg The index of the segment register to use for
7223 * this access. The base and limits are checked.
7224 * @param GCPtrMem The address of the guest memory.
7225 */
7226void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7227 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7228{
7229 /* The lazy approach for now... */
7230 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7231 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7232 pu128Dst->au64[0] = pu128Src->au64[0];
7233 pu128Dst->au64[1] = pu128Src->au64[1];
7234 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7235}
7236#endif
7237
7238
7239/**
7240 * Fetches a data oword (octo word), generally AVX related.
7241 *
7242 * @returns Strict VBox status code.
7243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7244 * @param pu256Dst Where to return the qword.
7245 * @param iSegReg The index of the segment register to use for
7246 * this access. The base and limits are checked.
7247 * @param GCPtrMem The address of the guest memory.
7248 */
7249VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7250{
7251 /* The lazy approach for now... */
7252 PCRTUINT256U pu256Src;
7253 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7254 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7255 if (rc == VINF_SUCCESS)
7256 {
7257 pu256Dst->au64[0] = pu256Src->au64[0];
7258 pu256Dst->au64[1] = pu256Src->au64[1];
7259 pu256Dst->au64[2] = pu256Src->au64[2];
7260 pu256Dst->au64[3] = pu256Src->au64[3];
7261 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7262 }
7263 return rc;
7264}
7265
7266
7267#ifdef IEM_WITH_SETJMP
7268/**
7269 * Fetches a data oword (octo word), generally AVX related.
7270 *
7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7272 * @param pu256Dst Where to return the qword.
7273 * @param iSegReg The index of the segment register to use for
7274 * this access. The base and limits are checked.
7275 * @param GCPtrMem The address of the guest memory.
7276 */
7277void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7278{
7279 /* The lazy approach for now... */
7280 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7281 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7282 pu256Dst->au64[0] = pu256Src->au64[0];
7283 pu256Dst->au64[1] = pu256Src->au64[1];
7284 pu256Dst->au64[2] = pu256Src->au64[2];
7285 pu256Dst->au64[3] = pu256Src->au64[3];
7286 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7287}
7288#endif
7289
7290
7291/**
7292 * Fetches a data oword (octo word) at an aligned address, generally AVX
7293 * related.
7294 *
7295 * Raises \#GP(0) if not aligned.
7296 *
7297 * @returns Strict VBox status code.
7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7299 * @param pu256Dst Where to return the qword.
7300 * @param iSegReg The index of the segment register to use for
7301 * this access. The base and limits are checked.
7302 * @param GCPtrMem The address of the guest memory.
7303 */
7304VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7305{
7306 /* The lazy approach for now... */
7307 PCRTUINT256U pu256Src;
7308 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7309 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7310 if (rc == VINF_SUCCESS)
7311 {
7312 pu256Dst->au64[0] = pu256Src->au64[0];
7313 pu256Dst->au64[1] = pu256Src->au64[1];
7314 pu256Dst->au64[2] = pu256Src->au64[2];
7315 pu256Dst->au64[3] = pu256Src->au64[3];
7316 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7317 }
7318 return rc;
7319}
7320
7321
7322#ifdef IEM_WITH_SETJMP
7323/**
7324 * Fetches a data oword (octo word) at an aligned address, generally AVX
7325 * related, longjmp on error.
7326 *
7327 * Raises \#GP(0) if not aligned.
7328 *
7329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7330 * @param pu256Dst Where to return the qword.
7331 * @param iSegReg The index of the segment register to use for
7332 * this access. The base and limits are checked.
7333 * @param GCPtrMem The address of the guest memory.
7334 */
7335void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7336 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7337{
7338 /* The lazy approach for now... */
7339 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7340 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7341 pu256Dst->au64[0] = pu256Src->au64[0];
7342 pu256Dst->au64[1] = pu256Src->au64[1];
7343 pu256Dst->au64[2] = pu256Src->au64[2];
7344 pu256Dst->au64[3] = pu256Src->au64[3];
7345 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7346}
7347#endif
7348
7349
7350
7351/**
7352 * Fetches a descriptor register (lgdt, lidt).
7353 *
7354 * @returns Strict VBox status code.
7355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7356 * @param pcbLimit Where to return the limit.
7357 * @param pGCPtrBase Where to return the base.
7358 * @param iSegReg The index of the segment register to use for
7359 * this access. The base and limits are checked.
7360 * @param GCPtrMem The address of the guest memory.
7361 * @param enmOpSize The effective operand size.
7362 */
7363VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7364 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7365{
7366 /*
7367 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7368 * little special:
7369 * - The two reads are done separately.
7370 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7371 * - We suspect the 386 to actually commit the limit before the base in
7372 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7373 * don't try emulate this eccentric behavior, because it's not well
7374 * enough understood and rather hard to trigger.
7375 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7376 */
7377 VBOXSTRICTRC rcStrict;
7378 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7379 {
7380 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7381 if (rcStrict == VINF_SUCCESS)
7382 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7383 }
7384 else
7385 {
7386 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7387 if (enmOpSize == IEMMODE_32BIT)
7388 {
7389 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7390 {
7391 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7392 if (rcStrict == VINF_SUCCESS)
7393 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7394 }
7395 else
7396 {
7397 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7398 if (rcStrict == VINF_SUCCESS)
7399 {
7400 *pcbLimit = (uint16_t)uTmp;
7401 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7402 }
7403 }
7404 if (rcStrict == VINF_SUCCESS)
7405 *pGCPtrBase = uTmp;
7406 }
7407 else
7408 {
7409 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7410 if (rcStrict == VINF_SUCCESS)
7411 {
7412 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7413 if (rcStrict == VINF_SUCCESS)
7414 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7415 }
7416 }
7417 }
7418 return rcStrict;
7419}
7420
7421
7422
7423/**
7424 * Stores a data byte.
7425 *
7426 * @returns Strict VBox status code.
7427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7428 * @param iSegReg The index of the segment register to use for
7429 * this access. The base and limits are checked.
7430 * @param GCPtrMem The address of the guest memory.
7431 * @param u8Value The value to store.
7432 */
7433VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7434{
7435 /* The lazy approach for now... */
7436 uint8_t *pu8Dst;
7437 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7438 if (rc == VINF_SUCCESS)
7439 {
7440 *pu8Dst = u8Value;
7441 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7442 }
7443 return rc;
7444}
7445
7446
7447#ifdef IEM_WITH_SETJMP
7448/**
7449 * Stores a data byte, longjmp on error.
7450 *
7451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7452 * @param iSegReg The index of the segment register to use for
7453 * this access. The base and limits are checked.
7454 * @param GCPtrMem The address of the guest memory.
7455 * @param u8Value The value to store.
7456 */
7457void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7458{
7459 /* The lazy approach for now... */
7460 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7461 *pu8Dst = u8Value;
7462 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7463}
7464#endif
7465
7466
7467/**
7468 * Stores a data word.
7469 *
7470 * @returns Strict VBox status code.
7471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7472 * @param iSegReg The index of the segment register to use for
7473 * this access. The base and limits are checked.
7474 * @param GCPtrMem The address of the guest memory.
7475 * @param u16Value The value to store.
7476 */
7477VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7478{
7479 /* The lazy approach for now... */
7480 uint16_t *pu16Dst;
7481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7482 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7483 if (rc == VINF_SUCCESS)
7484 {
7485 *pu16Dst = u16Value;
7486 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7487 }
7488 return rc;
7489}
7490
7491
7492#ifdef IEM_WITH_SETJMP
7493/**
7494 * Stores a data word, longjmp on error.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param iSegReg The index of the segment register to use for
7498 * this access. The base and limits are checked.
7499 * @param GCPtrMem The address of the guest memory.
7500 * @param u16Value The value to store.
7501 */
7502void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7503{
7504 /* The lazy approach for now... */
7505 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7506 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7507 *pu16Dst = u16Value;
7508 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7509}
7510#endif
7511
7512
7513/**
7514 * Stores a data dword.
7515 *
7516 * @returns Strict VBox status code.
7517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 * @param u32Value The value to store.
7522 */
7523VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7524{
7525 /* The lazy approach for now... */
7526 uint32_t *pu32Dst;
7527 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7528 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7529 if (rc == VINF_SUCCESS)
7530 {
7531 *pu32Dst = u32Value;
7532 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7533 }
7534 return rc;
7535}
7536
7537
7538#ifdef IEM_WITH_SETJMP
7539/**
7540 * Stores a data dword.
7541 *
7542 * @returns Strict VBox status code.
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 * @param iSegReg The index of the segment register to use for
7545 * this access. The base and limits are checked.
7546 * @param GCPtrMem The address of the guest memory.
7547 * @param u32Value The value to store.
7548 */
7549void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7550{
7551 /* The lazy approach for now... */
7552 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7553 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7554 *pu32Dst = u32Value;
7555 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7556}
7557#endif
7558
7559
7560/**
7561 * Stores a data qword.
7562 *
7563 * @returns Strict VBox status code.
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param iSegReg The index of the segment register to use for
7566 * this access. The base and limits are checked.
7567 * @param GCPtrMem The address of the guest memory.
7568 * @param u64Value The value to store.
7569 */
7570VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7571{
7572 /* The lazy approach for now... */
7573 uint64_t *pu64Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7575 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 *pu64Dst = u64Value;
7579 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7580 }
7581 return rc;
7582}
7583
7584
7585#ifdef IEM_WITH_SETJMP
7586/**
7587 * Stores a data qword, longjmp on error.
7588 *
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 * @param u64Value The value to store.
7594 */
7595void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7596{
7597 /* The lazy approach for now... */
7598 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7599 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7600 *pu64Dst = u64Value;
7601 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7602}
7603#endif
7604
7605
7606/**
7607 * Stores a data dqword.
7608 *
7609 * @returns Strict VBox status code.
7610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7611 * @param iSegReg The index of the segment register to use for
7612 * this access. The base and limits are checked.
7613 * @param GCPtrMem The address of the guest memory.
7614 * @param u128Value The value to store.
7615 */
7616VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7617{
7618 /* The lazy approach for now... */
7619 PRTUINT128U pu128Dst;
7620 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7621 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7622 if (rc == VINF_SUCCESS)
7623 {
7624 pu128Dst->au64[0] = u128Value.au64[0];
7625 pu128Dst->au64[1] = u128Value.au64[1];
7626 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7627 }
7628 return rc;
7629}
7630
7631
7632#ifdef IEM_WITH_SETJMP
7633/**
7634 * Stores a data dqword, longjmp on error.
7635 *
7636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7637 * @param iSegReg The index of the segment register to use for
7638 * this access. The base and limits are checked.
7639 * @param GCPtrMem The address of the guest memory.
7640 * @param u128Value The value to store.
7641 */
7642void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7643{
7644 /* The lazy approach for now... */
7645 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7646 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7647 pu128Dst->au64[0] = u128Value.au64[0];
7648 pu128Dst->au64[1] = u128Value.au64[1];
7649 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7650}
7651#endif
7652
7653
7654/**
7655 * Stores a data dqword, SSE aligned.
7656 *
7657 * @returns Strict VBox status code.
7658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7659 * @param iSegReg The index of the segment register to use for
7660 * this access. The base and limits are checked.
7661 * @param GCPtrMem The address of the guest memory.
7662 * @param u128Value The value to store.
7663 */
7664VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7665{
7666 /* The lazy approach for now... */
7667 PRTUINT128U pu128Dst;
7668 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7669 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7670 if (rc == VINF_SUCCESS)
7671 {
7672 pu128Dst->au64[0] = u128Value.au64[0];
7673 pu128Dst->au64[1] = u128Value.au64[1];
7674 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7675 }
7676 return rc;
7677}
7678
7679
7680#ifdef IEM_WITH_SETJMP
7681/**
7682 * Stores a data dqword, SSE aligned.
7683 *
7684 * @returns Strict VBox status code.
7685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7686 * @param iSegReg The index of the segment register to use for
7687 * this access. The base and limits are checked.
7688 * @param GCPtrMem The address of the guest memory.
7689 * @param u128Value The value to store.
7690 */
7691void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7692 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7693{
7694 /* The lazy approach for now... */
7695 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7696 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7697 pu128Dst->au64[0] = u128Value.au64[0];
7698 pu128Dst->au64[1] = u128Value.au64[1];
7699 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7700}
7701#endif
7702
7703
7704/**
7705 * Stores a data dqword.
7706 *
7707 * @returns Strict VBox status code.
7708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7709 * @param iSegReg The index of the segment register to use for
7710 * this access. The base and limits are checked.
7711 * @param GCPtrMem The address of the guest memory.
7712 * @param pu256Value Pointer to the value to store.
7713 */
7714VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7715{
7716 /* The lazy approach for now... */
7717 PRTUINT256U pu256Dst;
7718 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7719 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7720 if (rc == VINF_SUCCESS)
7721 {
7722 pu256Dst->au64[0] = pu256Value->au64[0];
7723 pu256Dst->au64[1] = pu256Value->au64[1];
7724 pu256Dst->au64[2] = pu256Value->au64[2];
7725 pu256Dst->au64[3] = pu256Value->au64[3];
7726 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7727 }
7728 return rc;
7729}
7730
7731
7732#ifdef IEM_WITH_SETJMP
7733/**
7734 * Stores a data dqword, longjmp on error.
7735 *
7736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7737 * @param iSegReg The index of the segment register to use for
7738 * this access. The base and limits are checked.
7739 * @param GCPtrMem The address of the guest memory.
7740 * @param pu256Value Pointer to the value to store.
7741 */
7742void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7743{
7744 /* The lazy approach for now... */
7745 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7746 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7747 pu256Dst->au64[0] = pu256Value->au64[0];
7748 pu256Dst->au64[1] = pu256Value->au64[1];
7749 pu256Dst->au64[2] = pu256Value->au64[2];
7750 pu256Dst->au64[3] = pu256Value->au64[3];
7751 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7752}
7753#endif
7754
7755
7756/**
7757 * Stores a data dqword, AVX \#GP(0) aligned.
7758 *
7759 * @returns Strict VBox status code.
7760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7761 * @param iSegReg The index of the segment register to use for
7762 * this access. The base and limits are checked.
7763 * @param GCPtrMem The address of the guest memory.
7764 * @param pu256Value Pointer to the value to store.
7765 */
7766VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7767{
7768 /* The lazy approach for now... */
7769 PRTUINT256U pu256Dst;
7770 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7771 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7772 if (rc == VINF_SUCCESS)
7773 {
7774 pu256Dst->au64[0] = pu256Value->au64[0];
7775 pu256Dst->au64[1] = pu256Value->au64[1];
7776 pu256Dst->au64[2] = pu256Value->au64[2];
7777 pu256Dst->au64[3] = pu256Value->au64[3];
7778 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7779 }
7780 return rc;
7781}
7782
7783
7784#ifdef IEM_WITH_SETJMP
7785/**
7786 * Stores a data dqword, AVX aligned.
7787 *
7788 * @returns Strict VBox status code.
7789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7790 * @param iSegReg The index of the segment register to use for
7791 * this access. The base and limits are checked.
7792 * @param GCPtrMem The address of the guest memory.
7793 * @param pu256Value Pointer to the value to store.
7794 */
7795void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7796 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7797{
7798 /* The lazy approach for now... */
7799 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7800 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7801 pu256Dst->au64[0] = pu256Value->au64[0];
7802 pu256Dst->au64[1] = pu256Value->au64[1];
7803 pu256Dst->au64[2] = pu256Value->au64[2];
7804 pu256Dst->au64[3] = pu256Value->au64[3];
7805 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7806}
7807#endif
7808
7809
7810/**
7811 * Stores a descriptor register (sgdt, sidt).
7812 *
7813 * @returns Strict VBox status code.
7814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7815 * @param cbLimit The limit.
7816 * @param GCPtrBase The base address.
7817 * @param iSegReg The index of the segment register to use for
7818 * this access. The base and limits are checked.
7819 * @param GCPtrMem The address of the guest memory.
7820 */
7821VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7822{
7823 /*
7824 * The SIDT and SGDT instructions actually stores the data using two
7825 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7826 * does not respond to opsize prefixes.
7827 */
7828 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7829 if (rcStrict == VINF_SUCCESS)
7830 {
7831 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7832 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7833 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7834 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7835 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7836 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7837 else
7838 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7839 }
7840 return rcStrict;
7841}
7842
7843
7844/**
7845 * Pushes a word onto the stack.
7846 *
7847 * @returns Strict VBox status code.
7848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7849 * @param u16Value The value to push.
7850 */
7851VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7852{
7853 /* Increment the stack pointer. */
7854 uint64_t uNewRsp;
7855 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7856
7857 /* Write the word the lazy way. */
7858 uint16_t *pu16Dst;
7859 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7860 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7861 if (rc == VINF_SUCCESS)
7862 {
7863 *pu16Dst = u16Value;
7864 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7865 }
7866
7867 /* Commit the new RSP value unless we an access handler made trouble. */
7868 if (rc == VINF_SUCCESS)
7869 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7870
7871 return rc;
7872}
7873
7874
7875/**
7876 * Pushes a dword onto the stack.
7877 *
7878 * @returns Strict VBox status code.
7879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7880 * @param u32Value The value to push.
7881 */
7882VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7883{
7884 /* Increment the stack pointer. */
7885 uint64_t uNewRsp;
7886 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7887
7888 /* Write the dword the lazy way. */
7889 uint32_t *pu32Dst;
7890 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7891 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7892 if (rc == VINF_SUCCESS)
7893 {
7894 *pu32Dst = u32Value;
7895 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7896 }
7897
7898 /* Commit the new RSP value unless we an access handler made trouble. */
7899 if (rc == VINF_SUCCESS)
7900 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7901
7902 return rc;
7903}
7904
7905
7906/**
7907 * Pushes a dword segment register value onto the stack.
7908 *
7909 * @returns Strict VBox status code.
7910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7911 * @param u32Value The value to push.
7912 */
7913VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7914{
7915 /* Increment the stack pointer. */
7916 uint64_t uNewRsp;
7917 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7918
7919 /* The intel docs talks about zero extending the selector register
7920 value. My actual intel CPU here might be zero extending the value
7921 but it still only writes the lower word... */
7922 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7923 * happens when crossing an electric page boundrary, is the high word checked
7924 * for write accessibility or not? Probably it is. What about segment limits?
7925 * It appears this behavior is also shared with trap error codes.
7926 *
7927 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7928 * ancient hardware when it actually did change. */
7929 uint16_t *pu16Dst;
7930 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7931 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7932 if (rc == VINF_SUCCESS)
7933 {
7934 *pu16Dst = (uint16_t)u32Value;
7935 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7936 }
7937
7938 /* Commit the new RSP value unless we an access handler made trouble. */
7939 if (rc == VINF_SUCCESS)
7940 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7941
7942 return rc;
7943}
7944
7945
7946/**
7947 * Pushes a qword onto the stack.
7948 *
7949 * @returns Strict VBox status code.
7950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7951 * @param u64Value The value to push.
7952 */
7953VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7954{
7955 /* Increment the stack pointer. */
7956 uint64_t uNewRsp;
7957 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7958
7959 /* Write the word the lazy way. */
7960 uint64_t *pu64Dst;
7961 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7962 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7963 if (rc == VINF_SUCCESS)
7964 {
7965 *pu64Dst = u64Value;
7966 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7967 }
7968
7969 /* Commit the new RSP value unless we an access handler made trouble. */
7970 if (rc == VINF_SUCCESS)
7971 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7972
7973 return rc;
7974}
7975
7976
7977/**
7978 * Pops a word from the stack.
7979 *
7980 * @returns Strict VBox status code.
7981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7982 * @param pu16Value Where to store the popped value.
7983 */
7984VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7985{
7986 /* Increment the stack pointer. */
7987 uint64_t uNewRsp;
7988 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7989
7990 /* Write the word the lazy way. */
7991 uint16_t const *pu16Src;
7992 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7993 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7994 if (rc == VINF_SUCCESS)
7995 {
7996 *pu16Value = *pu16Src;
7997 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7998
7999 /* Commit the new RSP value. */
8000 if (rc == VINF_SUCCESS)
8001 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8002 }
8003
8004 return rc;
8005}
8006
8007
8008/**
8009 * Pops a dword from the stack.
8010 *
8011 * @returns Strict VBox status code.
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param pu32Value Where to store the popped value.
8014 */
8015VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8016{
8017 /* Increment the stack pointer. */
8018 uint64_t uNewRsp;
8019 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8020
8021 /* Write the word the lazy way. */
8022 uint32_t const *pu32Src;
8023 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8024 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8025 if (rc == VINF_SUCCESS)
8026 {
8027 *pu32Value = *pu32Src;
8028 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8029
8030 /* Commit the new RSP value. */
8031 if (rc == VINF_SUCCESS)
8032 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8033 }
8034
8035 return rc;
8036}
8037
8038
8039/**
8040 * Pops a qword from the stack.
8041 *
8042 * @returns Strict VBox status code.
8043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8044 * @param pu64Value Where to store the popped value.
8045 */
8046VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8047{
8048 /* Increment the stack pointer. */
8049 uint64_t uNewRsp;
8050 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8051
8052 /* Write the word the lazy way. */
8053 uint64_t const *pu64Src;
8054 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8055 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8056 if (rc == VINF_SUCCESS)
8057 {
8058 *pu64Value = *pu64Src;
8059 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8060
8061 /* Commit the new RSP value. */
8062 if (rc == VINF_SUCCESS)
8063 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8064 }
8065
8066 return rc;
8067}
8068
8069
8070/**
8071 * Pushes a word onto the stack, using a temporary stack pointer.
8072 *
8073 * @returns Strict VBox status code.
8074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8075 * @param u16Value The value to push.
8076 * @param pTmpRsp Pointer to the temporary stack pointer.
8077 */
8078VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8079{
8080 /* Increment the stack pointer. */
8081 RTUINT64U NewRsp = *pTmpRsp;
8082 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8083
8084 /* Write the word the lazy way. */
8085 uint16_t *pu16Dst;
8086 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8087 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8088 if (rc == VINF_SUCCESS)
8089 {
8090 *pu16Dst = u16Value;
8091 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8092 }
8093
8094 /* Commit the new RSP value unless we an access handler made trouble. */
8095 if (rc == VINF_SUCCESS)
8096 *pTmpRsp = NewRsp;
8097
8098 return rc;
8099}
8100
8101
8102/**
8103 * Pushes a dword onto the stack, using a temporary stack pointer.
8104 *
8105 * @returns Strict VBox status code.
8106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8107 * @param u32Value The value to push.
8108 * @param pTmpRsp Pointer to the temporary stack pointer.
8109 */
8110VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8111{
8112 /* Increment the stack pointer. */
8113 RTUINT64U NewRsp = *pTmpRsp;
8114 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8115
8116 /* Write the word the lazy way. */
8117 uint32_t *pu32Dst;
8118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8119 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8120 if (rc == VINF_SUCCESS)
8121 {
8122 *pu32Dst = u32Value;
8123 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8124 }
8125
8126 /* Commit the new RSP value unless we an access handler made trouble. */
8127 if (rc == VINF_SUCCESS)
8128 *pTmpRsp = NewRsp;
8129
8130 return rc;
8131}
8132
8133
8134/**
8135 * Pushes a dword onto the stack, using a temporary stack pointer.
8136 *
8137 * @returns Strict VBox status code.
8138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8139 * @param u64Value The value to push.
8140 * @param pTmpRsp Pointer to the temporary stack pointer.
8141 */
8142VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8143{
8144 /* Increment the stack pointer. */
8145 RTUINT64U NewRsp = *pTmpRsp;
8146 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8147
8148 /* Write the word the lazy way. */
8149 uint64_t *pu64Dst;
8150 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8151 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8152 if (rc == VINF_SUCCESS)
8153 {
8154 *pu64Dst = u64Value;
8155 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8156 }
8157
8158 /* Commit the new RSP value unless we an access handler made trouble. */
8159 if (rc == VINF_SUCCESS)
8160 *pTmpRsp = NewRsp;
8161
8162 return rc;
8163}
8164
8165
8166/**
8167 * Pops a word from the stack, using a temporary stack pointer.
8168 *
8169 * @returns Strict VBox status code.
8170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8171 * @param pu16Value Where to store the popped value.
8172 * @param pTmpRsp Pointer to the temporary stack pointer.
8173 */
8174VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8175{
8176 /* Increment the stack pointer. */
8177 RTUINT64U NewRsp = *pTmpRsp;
8178 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8179
8180 /* Write the word the lazy way. */
8181 uint16_t const *pu16Src;
8182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8183 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8184 if (rc == VINF_SUCCESS)
8185 {
8186 *pu16Value = *pu16Src;
8187 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8188
8189 /* Commit the new RSP value. */
8190 if (rc == VINF_SUCCESS)
8191 *pTmpRsp = NewRsp;
8192 }
8193
8194 return rc;
8195}
8196
8197
8198/**
8199 * Pops a dword from the stack, using a temporary stack pointer.
8200 *
8201 * @returns Strict VBox status code.
8202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8203 * @param pu32Value Where to store the popped value.
8204 * @param pTmpRsp Pointer to the temporary stack pointer.
8205 */
8206VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8207{
8208 /* Increment the stack pointer. */
8209 RTUINT64U NewRsp = *pTmpRsp;
8210 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8211
8212 /* Write the word the lazy way. */
8213 uint32_t const *pu32Src;
8214 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8215 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8216 if (rc == VINF_SUCCESS)
8217 {
8218 *pu32Value = *pu32Src;
8219 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8220
8221 /* Commit the new RSP value. */
8222 if (rc == VINF_SUCCESS)
8223 *pTmpRsp = NewRsp;
8224 }
8225
8226 return rc;
8227}
8228
8229
8230/**
8231 * Pops a qword from the stack, using a temporary stack pointer.
8232 *
8233 * @returns Strict VBox status code.
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 * @param pu64Value Where to store the popped value.
8236 * @param pTmpRsp Pointer to the temporary stack pointer.
8237 */
8238VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8239{
8240 /* Increment the stack pointer. */
8241 RTUINT64U NewRsp = *pTmpRsp;
8242 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8243
8244 /* Write the word the lazy way. */
8245 uint64_t const *pu64Src;
8246 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8247 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8248 if (rcStrict == VINF_SUCCESS)
8249 {
8250 *pu64Value = *pu64Src;
8251 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8252
8253 /* Commit the new RSP value. */
8254 if (rcStrict == VINF_SUCCESS)
8255 *pTmpRsp = NewRsp;
8256 }
8257
8258 return rcStrict;
8259}
8260
8261
8262/**
8263 * Begin a special stack push (used by interrupt, exceptions and such).
8264 *
8265 * This will raise \#SS or \#PF if appropriate.
8266 *
8267 * @returns Strict VBox status code.
8268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8269 * @param cbMem The number of bytes to push onto the stack.
8270 * @param cbAlign The alignment mask (7, 3, 1).
8271 * @param ppvMem Where to return the pointer to the stack memory.
8272 * As with the other memory functions this could be
8273 * direct access or bounce buffered access, so
8274 * don't commit register until the commit call
8275 * succeeds.
8276 * @param puNewRsp Where to return the new RSP value. This must be
8277 * passed unchanged to
8278 * iemMemStackPushCommitSpecial().
8279 */
8280VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8281 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8282{
8283 Assert(cbMem < UINT8_MAX);
8284 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8285 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8286 IEM_ACCESS_STACK_W, cbAlign);
8287}
8288
8289
8290/**
8291 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8292 *
8293 * This will update the rSP.
8294 *
8295 * @returns Strict VBox status code.
8296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8297 * @param pvMem The pointer returned by
8298 * iemMemStackPushBeginSpecial().
8299 * @param uNewRsp The new RSP value returned by
8300 * iemMemStackPushBeginSpecial().
8301 */
8302VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8303{
8304 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8305 if (rcStrict == VINF_SUCCESS)
8306 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8307 return rcStrict;
8308}
8309
8310
8311/**
8312 * Begin a special stack pop (used by iret, retf and such).
8313 *
8314 * This will raise \#SS or \#PF if appropriate.
8315 *
8316 * @returns Strict VBox status code.
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param cbMem The number of bytes to pop from the stack.
8319 * @param cbAlign The alignment mask (7, 3, 1).
8320 * @param ppvMem Where to return the pointer to the stack memory.
8321 * @param puNewRsp Where to return the new RSP value. This must be
8322 * assigned to CPUMCTX::rsp manually some time
8323 * after iemMemStackPopDoneSpecial() has been
8324 * called.
8325 */
8326VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8327 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8328{
8329 Assert(cbMem < UINT8_MAX);
8330 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8331 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8332}
8333
8334
8335/**
8336 * Continue a special stack pop (used by iret and retf), for the purpose of
8337 * retrieving a new stack pointer.
8338 *
8339 * This will raise \#SS or \#PF if appropriate.
8340 *
8341 * @returns Strict VBox status code.
8342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8343 * @param off Offset from the top of the stack. This is zero
8344 * except in the retf case.
8345 * @param cbMem The number of bytes to pop from the stack.
8346 * @param ppvMem Where to return the pointer to the stack memory.
8347 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8348 * return this because all use of this function is
8349 * to retrieve a new value and anything we return
8350 * here would be discarded.)
8351 */
8352VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8353 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8354{
8355 Assert(cbMem < UINT8_MAX);
8356
8357 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8358 RTGCPTR GCPtrTop;
8359 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8360 GCPtrTop = uCurNewRsp;
8361 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8362 GCPtrTop = (uint32_t)uCurNewRsp;
8363 else
8364 GCPtrTop = (uint16_t)uCurNewRsp;
8365
8366 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8367 0 /* checked in iemMemStackPopBeginSpecial */);
8368}
8369
8370
8371/**
8372 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8373 * iemMemStackPopContinueSpecial).
8374 *
8375 * The caller will manually commit the rSP.
8376 *
8377 * @returns Strict VBox status code.
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param pvMem The pointer returned by
8380 * iemMemStackPopBeginSpecial() or
8381 * iemMemStackPopContinueSpecial().
8382 */
8383VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8384{
8385 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8386}
8387
8388
8389/**
8390 * Fetches a system table byte.
8391 *
8392 * @returns Strict VBox status code.
8393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8394 * @param pbDst Where to return the byte.
8395 * @param iSegReg The index of the segment register to use for
8396 * this access. The base and limits are checked.
8397 * @param GCPtrMem The address of the guest memory.
8398 */
8399VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8400{
8401 /* The lazy approach for now... */
8402 uint8_t const *pbSrc;
8403 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8404 if (rc == VINF_SUCCESS)
8405 {
8406 *pbDst = *pbSrc;
8407 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8408 }
8409 return rc;
8410}
8411
8412
8413/**
8414 * Fetches a system table word.
8415 *
8416 * @returns Strict VBox status code.
8417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8418 * @param pu16Dst Where to return the word.
8419 * @param iSegReg The index of the segment register to use for
8420 * this access. The base and limits are checked.
8421 * @param GCPtrMem The address of the guest memory.
8422 */
8423VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8424{
8425 /* The lazy approach for now... */
8426 uint16_t const *pu16Src;
8427 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8428 if (rc == VINF_SUCCESS)
8429 {
8430 *pu16Dst = *pu16Src;
8431 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8432 }
8433 return rc;
8434}
8435
8436
8437/**
8438 * Fetches a system table dword.
8439 *
8440 * @returns Strict VBox status code.
8441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8442 * @param pu32Dst Where to return the dword.
8443 * @param iSegReg The index of the segment register to use for
8444 * this access. The base and limits are checked.
8445 * @param GCPtrMem The address of the guest memory.
8446 */
8447VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8448{
8449 /* The lazy approach for now... */
8450 uint32_t const *pu32Src;
8451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8452 if (rc == VINF_SUCCESS)
8453 {
8454 *pu32Dst = *pu32Src;
8455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8456 }
8457 return rc;
8458}
8459
8460
8461/**
8462 * Fetches a system table qword.
8463 *
8464 * @returns Strict VBox status code.
8465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8466 * @param pu64Dst Where to return the qword.
8467 * @param iSegReg The index of the segment register to use for
8468 * this access. The base and limits are checked.
8469 * @param GCPtrMem The address of the guest memory.
8470 */
8471VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8472{
8473 /* The lazy approach for now... */
8474 uint64_t const *pu64Src;
8475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8476 if (rc == VINF_SUCCESS)
8477 {
8478 *pu64Dst = *pu64Src;
8479 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8480 }
8481 return rc;
8482}
8483
8484
8485/**
8486 * Fetches a descriptor table entry with caller specified error code.
8487 *
8488 * @returns Strict VBox status code.
8489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8490 * @param pDesc Where to return the descriptor table entry.
8491 * @param uSel The selector which table entry to fetch.
8492 * @param uXcpt The exception to raise on table lookup error.
8493 * @param uErrorCode The error code associated with the exception.
8494 */
8495static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8496 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8497{
8498 AssertPtr(pDesc);
8499 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8500
8501 /** @todo did the 286 require all 8 bytes to be accessible? */
8502 /*
8503 * Get the selector table base and check bounds.
8504 */
8505 RTGCPTR GCPtrBase;
8506 if (uSel & X86_SEL_LDT)
8507 {
8508 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8509 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8510 {
8511 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8512 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8513 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8514 uErrorCode, 0);
8515 }
8516
8517 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8518 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8519 }
8520 else
8521 {
8522 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8523 {
8524 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8525 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8526 uErrorCode, 0);
8527 }
8528 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8529 }
8530
8531 /*
8532 * Read the legacy descriptor and maybe the long mode extensions if
8533 * required.
8534 */
8535 VBOXSTRICTRC rcStrict;
8536 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8537 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8538 else
8539 {
8540 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8541 if (rcStrict == VINF_SUCCESS)
8542 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8543 if (rcStrict == VINF_SUCCESS)
8544 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8545 if (rcStrict == VINF_SUCCESS)
8546 pDesc->Legacy.au16[3] = 0;
8547 else
8548 return rcStrict;
8549 }
8550
8551 if (rcStrict == VINF_SUCCESS)
8552 {
8553 if ( !IEM_IS_LONG_MODE(pVCpu)
8554 || pDesc->Legacy.Gen.u1DescType)
8555 pDesc->Long.au64[1] = 0;
8556 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8557 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8558 else
8559 {
8560 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8561 /** @todo is this the right exception? */
8562 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8563 }
8564 }
8565 return rcStrict;
8566}
8567
8568
8569/**
8570 * Fetches a descriptor table entry.
8571 *
8572 * @returns Strict VBox status code.
8573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8574 * @param pDesc Where to return the descriptor table entry.
8575 * @param uSel The selector which table entry to fetch.
8576 * @param uXcpt The exception to raise on table lookup error.
8577 */
8578VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8579{
8580 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8581}
8582
8583
8584/**
8585 * Marks the selector descriptor as accessed (only non-system descriptors).
8586 *
8587 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8588 * will therefore skip the limit checks.
8589 *
8590 * @returns Strict VBox status code.
8591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8592 * @param uSel The selector.
8593 */
8594VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8595{
8596 /*
8597 * Get the selector table base and calculate the entry address.
8598 */
8599 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8600 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8601 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8602 GCPtr += uSel & X86_SEL_MASK;
8603
8604 /*
8605 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8606 * ugly stuff to avoid this. This will make sure it's an atomic access
8607 * as well more or less remove any question about 8-bit or 32-bit accesss.
8608 */
8609 VBOXSTRICTRC rcStrict;
8610 uint32_t volatile *pu32;
8611 if ((GCPtr & 3) == 0)
8612 {
8613 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8614 GCPtr += 2 + 2;
8615 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8616 if (rcStrict != VINF_SUCCESS)
8617 return rcStrict;
8618 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8619 }
8620 else
8621 {
8622 /* The misaligned GDT/LDT case, map the whole thing. */
8623 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8624 if (rcStrict != VINF_SUCCESS)
8625 return rcStrict;
8626 switch ((uintptr_t)pu32 & 3)
8627 {
8628 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8629 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8630 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8631 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8632 }
8633 }
8634
8635 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8636}
8637
8638/** @} */
8639
8640/** @name Opcode Helpers.
8641 * @{
8642 */
8643
8644/**
8645 * Calculates the effective address of a ModR/M memory operand.
8646 *
8647 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8648 *
8649 * @return Strict VBox status code.
8650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8651 * @param bRm The ModRM byte.
8652 * @param cbImm The size of any immediate following the
8653 * effective address opcode bytes. Important for
8654 * RIP relative addressing.
8655 * @param pGCPtrEff Where to return the effective address.
8656 */
8657VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8658{
8659 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8660# define SET_SS_DEF() \
8661 do \
8662 { \
8663 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8664 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8665 } while (0)
8666
8667 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8668 {
8669/** @todo Check the effective address size crap! */
8670 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8671 {
8672 uint16_t u16EffAddr;
8673
8674 /* Handle the disp16 form with no registers first. */
8675 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8676 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8677 else
8678 {
8679 /* Get the displacment. */
8680 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8681 {
8682 case 0: u16EffAddr = 0; break;
8683 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8684 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8685 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8686 }
8687
8688 /* Add the base and index registers to the disp. */
8689 switch (bRm & X86_MODRM_RM_MASK)
8690 {
8691 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8692 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8693 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8694 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8695 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8696 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8697 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8698 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8699 }
8700 }
8701
8702 *pGCPtrEff = u16EffAddr;
8703 }
8704 else
8705 {
8706 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8707 uint32_t u32EffAddr;
8708
8709 /* Handle the disp32 form with no registers first. */
8710 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8711 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8712 else
8713 {
8714 /* Get the register (or SIB) value. */
8715 switch ((bRm & X86_MODRM_RM_MASK))
8716 {
8717 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8718 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8719 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8720 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8721 case 4: /* SIB */
8722 {
8723 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8724
8725 /* Get the index and scale it. */
8726 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8727 {
8728 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8729 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8730 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8731 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8732 case 4: u32EffAddr = 0; /*none */ break;
8733 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8734 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8735 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8737 }
8738 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8739
8740 /* add base */
8741 switch (bSib & X86_SIB_BASE_MASK)
8742 {
8743 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8744 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8745 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8746 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8747 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8748 case 5:
8749 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8750 {
8751 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8752 SET_SS_DEF();
8753 }
8754 else
8755 {
8756 uint32_t u32Disp;
8757 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8758 u32EffAddr += u32Disp;
8759 }
8760 break;
8761 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8762 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8764 }
8765 break;
8766 }
8767 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8768 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8769 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8771 }
8772
8773 /* Get and add the displacement. */
8774 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8775 {
8776 case 0:
8777 break;
8778 case 1:
8779 {
8780 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8781 u32EffAddr += i8Disp;
8782 break;
8783 }
8784 case 2:
8785 {
8786 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8787 u32EffAddr += u32Disp;
8788 break;
8789 }
8790 default:
8791 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8792 }
8793
8794 }
8795 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8796 *pGCPtrEff = u32EffAddr;
8797 else
8798 {
8799 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8800 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8801 }
8802 }
8803 }
8804 else
8805 {
8806 uint64_t u64EffAddr;
8807
8808 /* Handle the rip+disp32 form with no registers first. */
8809 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8810 {
8811 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8812 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8813 }
8814 else
8815 {
8816 /* Get the register (or SIB) value. */
8817 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8818 {
8819 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8820 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8821 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8822 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8823 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8824 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8825 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8826 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8827 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8828 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8829 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8830 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8831 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8832 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8833 /* SIB */
8834 case 4:
8835 case 12:
8836 {
8837 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8838
8839 /* Get the index and scale it. */
8840 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8841 {
8842 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8843 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8844 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8845 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8846 case 4: u64EffAddr = 0; /*none */ break;
8847 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8848 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8849 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8850 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8851 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8852 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8853 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8854 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8855 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8856 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8857 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8859 }
8860 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8861
8862 /* add base */
8863 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8864 {
8865 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8866 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8867 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8868 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8869 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8870 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8871 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8872 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8873 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8874 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8875 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8876 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8877 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8878 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8879 /* complicated encodings */
8880 case 5:
8881 case 13:
8882 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8883 {
8884 if (!pVCpu->iem.s.uRexB)
8885 {
8886 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8887 SET_SS_DEF();
8888 }
8889 else
8890 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8891 }
8892 else
8893 {
8894 uint32_t u32Disp;
8895 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8896 u64EffAddr += (int32_t)u32Disp;
8897 }
8898 break;
8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8900 }
8901 break;
8902 }
8903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8904 }
8905
8906 /* Get and add the displacement. */
8907 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8908 {
8909 case 0:
8910 break;
8911 case 1:
8912 {
8913 int8_t i8Disp;
8914 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8915 u64EffAddr += i8Disp;
8916 break;
8917 }
8918 case 2:
8919 {
8920 uint32_t u32Disp;
8921 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8922 u64EffAddr += (int32_t)u32Disp;
8923 break;
8924 }
8925 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8926 }
8927
8928 }
8929
8930 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8931 *pGCPtrEff = u64EffAddr;
8932 else
8933 {
8934 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8935 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8936 }
8937 }
8938
8939 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8940 return VINF_SUCCESS;
8941}
8942
8943
8944/**
8945 * Calculates the effective address of a ModR/M memory operand.
8946 *
8947 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8948 *
8949 * @return Strict VBox status code.
8950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8951 * @param bRm The ModRM byte.
8952 * @param cbImm The size of any immediate following the
8953 * effective address opcode bytes. Important for
8954 * RIP relative addressing.
8955 * @param pGCPtrEff Where to return the effective address.
8956 * @param offRsp RSP displacement.
8957 */
8958VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8959{
8960 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8961# define SET_SS_DEF() \
8962 do \
8963 { \
8964 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8965 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8966 } while (0)
8967
8968 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8969 {
8970/** @todo Check the effective address size crap! */
8971 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8972 {
8973 uint16_t u16EffAddr;
8974
8975 /* Handle the disp16 form with no registers first. */
8976 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8977 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8978 else
8979 {
8980 /* Get the displacment. */
8981 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8982 {
8983 case 0: u16EffAddr = 0; break;
8984 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8985 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8986 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8987 }
8988
8989 /* Add the base and index registers to the disp. */
8990 switch (bRm & X86_MODRM_RM_MASK)
8991 {
8992 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8993 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8994 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8995 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8996 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8997 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8998 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8999 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9000 }
9001 }
9002
9003 *pGCPtrEff = u16EffAddr;
9004 }
9005 else
9006 {
9007 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9008 uint32_t u32EffAddr;
9009
9010 /* Handle the disp32 form with no registers first. */
9011 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9012 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9013 else
9014 {
9015 /* Get the register (or SIB) value. */
9016 switch ((bRm & X86_MODRM_RM_MASK))
9017 {
9018 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9019 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9020 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9021 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9022 case 4: /* SIB */
9023 {
9024 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9025
9026 /* Get the index and scale it. */
9027 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9028 {
9029 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9030 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9031 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9032 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9033 case 4: u32EffAddr = 0; /*none */ break;
9034 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9035 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9036 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9037 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9038 }
9039 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9040
9041 /* add base */
9042 switch (bSib & X86_SIB_BASE_MASK)
9043 {
9044 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9045 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9046 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9047 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9048 case 4:
9049 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9050 SET_SS_DEF();
9051 break;
9052 case 5:
9053 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9054 {
9055 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9056 SET_SS_DEF();
9057 }
9058 else
9059 {
9060 uint32_t u32Disp;
9061 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9062 u32EffAddr += u32Disp;
9063 }
9064 break;
9065 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9066 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9068 }
9069 break;
9070 }
9071 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9072 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9073 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9075 }
9076
9077 /* Get and add the displacement. */
9078 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9079 {
9080 case 0:
9081 break;
9082 case 1:
9083 {
9084 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9085 u32EffAddr += i8Disp;
9086 break;
9087 }
9088 case 2:
9089 {
9090 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9091 u32EffAddr += u32Disp;
9092 break;
9093 }
9094 default:
9095 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9096 }
9097
9098 }
9099 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9100 *pGCPtrEff = u32EffAddr;
9101 else
9102 {
9103 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9104 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9105 }
9106 }
9107 }
9108 else
9109 {
9110 uint64_t u64EffAddr;
9111
9112 /* Handle the rip+disp32 form with no registers first. */
9113 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9114 {
9115 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9116 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9117 }
9118 else
9119 {
9120 /* Get the register (or SIB) value. */
9121 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9122 {
9123 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9124 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9125 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9126 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9127 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9128 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9129 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9130 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9131 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9132 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9133 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9134 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9135 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9136 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9137 /* SIB */
9138 case 4:
9139 case 12:
9140 {
9141 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9142
9143 /* Get the index and scale it. */
9144 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9145 {
9146 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9147 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9148 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9149 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9150 case 4: u64EffAddr = 0; /*none */ break;
9151 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9152 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9153 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9154 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9155 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9156 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9157 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9158 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9159 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9160 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9161 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9163 }
9164 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9165
9166 /* add base */
9167 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9168 {
9169 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9170 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9171 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9172 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9173 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9174 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9175 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9176 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9177 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9178 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9179 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9180 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9181 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9182 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9183 /* complicated encodings */
9184 case 5:
9185 case 13:
9186 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9187 {
9188 if (!pVCpu->iem.s.uRexB)
9189 {
9190 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9191 SET_SS_DEF();
9192 }
9193 else
9194 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9195 }
9196 else
9197 {
9198 uint32_t u32Disp;
9199 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9200 u64EffAddr += (int32_t)u32Disp;
9201 }
9202 break;
9203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9204 }
9205 break;
9206 }
9207 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9208 }
9209
9210 /* Get and add the displacement. */
9211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9212 {
9213 case 0:
9214 break;
9215 case 1:
9216 {
9217 int8_t i8Disp;
9218 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9219 u64EffAddr += i8Disp;
9220 break;
9221 }
9222 case 2:
9223 {
9224 uint32_t u32Disp;
9225 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9226 u64EffAddr += (int32_t)u32Disp;
9227 break;
9228 }
9229 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9230 }
9231
9232 }
9233
9234 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9235 *pGCPtrEff = u64EffAddr;
9236 else
9237 {
9238 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9239 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9240 }
9241 }
9242
9243 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9244 return VINF_SUCCESS;
9245}
9246
9247
9248#ifdef IEM_WITH_SETJMP
9249/**
9250 * Calculates the effective address of a ModR/M memory operand.
9251 *
9252 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9253 *
9254 * May longjmp on internal error.
9255 *
9256 * @return The effective address.
9257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9258 * @param bRm The ModRM byte.
9259 * @param cbImm The size of any immediate following the
9260 * effective address opcode bytes. Important for
9261 * RIP relative addressing.
9262 */
9263RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP
9264{
9265 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9266# define SET_SS_DEF() \
9267 do \
9268 { \
9269 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9270 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9271 } while (0)
9272
9273 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9274 {
9275/** @todo Check the effective address size crap! */
9276 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9277 {
9278 uint16_t u16EffAddr;
9279
9280 /* Handle the disp16 form with no registers first. */
9281 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9282 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9283 else
9284 {
9285 /* Get the displacment. */
9286 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9287 {
9288 case 0: u16EffAddr = 0; break;
9289 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9290 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9291 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9292 }
9293
9294 /* Add the base and index registers to the disp. */
9295 switch (bRm & X86_MODRM_RM_MASK)
9296 {
9297 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9298 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9299 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9300 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9301 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9302 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9303 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9304 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9305 }
9306 }
9307
9308 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9309 return u16EffAddr;
9310 }
9311
9312 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9313 uint32_t u32EffAddr;
9314
9315 /* Handle the disp32 form with no registers first. */
9316 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9317 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9318 else
9319 {
9320 /* Get the register (or SIB) value. */
9321 switch ((bRm & X86_MODRM_RM_MASK))
9322 {
9323 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9324 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9325 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9326 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9327 case 4: /* SIB */
9328 {
9329 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9330
9331 /* Get the index and scale it. */
9332 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9333 {
9334 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9335 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9336 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9337 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9338 case 4: u32EffAddr = 0; /*none */ break;
9339 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9340 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9341 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9342 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9343 }
9344 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9345
9346 /* add base */
9347 switch (bSib & X86_SIB_BASE_MASK)
9348 {
9349 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9350 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9351 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9352 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9353 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9354 case 5:
9355 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9356 {
9357 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9358 SET_SS_DEF();
9359 }
9360 else
9361 {
9362 uint32_t u32Disp;
9363 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9364 u32EffAddr += u32Disp;
9365 }
9366 break;
9367 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9368 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9369 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9370 }
9371 break;
9372 }
9373 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9374 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9375 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9376 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9377 }
9378
9379 /* Get and add the displacement. */
9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9381 {
9382 case 0:
9383 break;
9384 case 1:
9385 {
9386 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9387 u32EffAddr += i8Disp;
9388 break;
9389 }
9390 case 2:
9391 {
9392 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9393 u32EffAddr += u32Disp;
9394 break;
9395 }
9396 default:
9397 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9398 }
9399 }
9400
9401 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9402 {
9403 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9404 return u32EffAddr;
9405 }
9406 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9407 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9408 return u32EffAddr & UINT16_MAX;
9409 }
9410
9411 uint64_t u64EffAddr;
9412
9413 /* Handle the rip+disp32 form with no registers first. */
9414 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9415 {
9416 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9417 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9418 }
9419 else
9420 {
9421 /* Get the register (or SIB) value. */
9422 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9423 {
9424 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9425 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9426 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9427 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9428 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9429 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9430 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9431 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9432 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9433 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9434 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9435 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9436 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9437 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9438 /* SIB */
9439 case 4:
9440 case 12:
9441 {
9442 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9443
9444 /* Get the index and scale it. */
9445 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9446 {
9447 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9448 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9449 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9450 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9451 case 4: u64EffAddr = 0; /*none */ break;
9452 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9453 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9454 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9455 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9456 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9457 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9458 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9459 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9460 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9461 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9462 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9463 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9464 }
9465 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9466
9467 /* add base */
9468 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9469 {
9470 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9471 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9472 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9473 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9474 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9475 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9476 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9477 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9478 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9479 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9480 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9481 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9482 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9483 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9484 /* complicated encodings */
9485 case 5:
9486 case 13:
9487 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9488 {
9489 if (!pVCpu->iem.s.uRexB)
9490 {
9491 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9492 SET_SS_DEF();
9493 }
9494 else
9495 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9496 }
9497 else
9498 {
9499 uint32_t u32Disp;
9500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9501 u64EffAddr += (int32_t)u32Disp;
9502 }
9503 break;
9504 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9505 }
9506 break;
9507 }
9508 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9509 }
9510
9511 /* Get and add the displacement. */
9512 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9513 {
9514 case 0:
9515 break;
9516 case 1:
9517 {
9518 int8_t i8Disp;
9519 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9520 u64EffAddr += i8Disp;
9521 break;
9522 }
9523 case 2:
9524 {
9525 uint32_t u32Disp;
9526 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9527 u64EffAddr += (int32_t)u32Disp;
9528 break;
9529 }
9530 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9531 }
9532
9533 }
9534
9535 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9536 {
9537 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9538 return u64EffAddr;
9539 }
9540 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9541 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9542 return u64EffAddr & UINT32_MAX;
9543}
9544#endif /* IEM_WITH_SETJMP */
9545
9546/** @} */
9547
9548
9549#ifdef LOG_ENABLED
9550/**
9551 * Logs the current instruction.
9552 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9553 * @param fSameCtx Set if we have the same context information as the VMM,
9554 * clear if we may have already executed an instruction in
9555 * our debug context. When clear, we assume IEMCPU holds
9556 * valid CPU mode info.
9557 *
9558 * The @a fSameCtx parameter is now misleading and obsolete.
9559 * @param pszFunction The IEM function doing the execution.
9560 */
9561static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9562{
9563# ifdef IN_RING3
9564 if (LogIs2Enabled())
9565 {
9566 char szInstr[256];
9567 uint32_t cbInstr = 0;
9568 if (fSameCtx)
9569 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9570 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9571 szInstr, sizeof(szInstr), &cbInstr);
9572 else
9573 {
9574 uint32_t fFlags = 0;
9575 switch (pVCpu->iem.s.enmCpuMode)
9576 {
9577 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9578 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9579 case IEMMODE_16BIT:
9580 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9581 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9582 else
9583 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9584 break;
9585 }
9586 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9587 szInstr, sizeof(szInstr), &cbInstr);
9588 }
9589
9590 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9591 Log2(("**** %s\n"
9592 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9593 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9594 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9595 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9596 " %s\n"
9597 , pszFunction,
9598 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9599 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9600 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9601 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9602 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9603 szInstr));
9604
9605 if (LogIs3Enabled())
9606 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9607 }
9608 else
9609# endif
9610 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9611 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9612 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9613}
9614#endif /* LOG_ENABLED */
9615
9616
9617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9618/**
9619 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9620 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9621 *
9622 * @returns Modified rcStrict.
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param rcStrict The instruction execution status.
9625 */
9626static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9627{
9628 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9629 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9630 {
9631 /* VMX preemption timer takes priority over NMI-window exits. */
9632 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9633 {
9634 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9635 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9636 }
9637 /*
9638 * Check remaining intercepts.
9639 *
9640 * NMI-window and Interrupt-window VM-exits.
9641 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9642 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9643 *
9644 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9645 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9646 */
9647 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9648 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9649 && !TRPMHasTrap(pVCpu))
9650 {
9651 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9652 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9653 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9654 {
9655 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9656 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9657 }
9658 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9659 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9660 {
9661 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9662 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9663 }
9664 }
9665 }
9666 /* TPR-below threshold/APIC write has the highest priority. */
9667 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9668 {
9669 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9670 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9671 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9672 }
9673 /* MTF takes priority over VMX-preemption timer. */
9674 else
9675 {
9676 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9677 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9678 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9679 }
9680 return rcStrict;
9681}
9682#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9683
9684
9685/** @def IEM_TRY_SETJMP
9686 * Wrapper around setjmp / try, hiding all the ugly differences.
9687 *
9688 * @note Use with extreme care as this is a fragile macro.
9689 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9690 * @param a_rcTarget The variable that should receive the status code in case
9691 * of a longjmp/throw.
9692 */
9693/** @def IEM_TRY_SETJMP_AGAIN
9694 * For when setjmp / try is used again in the same variable scope as a previous
9695 * IEM_TRY_SETJMP invocation.
9696 */
9697/** @def IEM_CATCH_LONGJMP_BEGIN
9698 * Start wrapper for catch / setjmp-else.
9699 *
9700 * This will set up a scope.
9701 *
9702 * @note Use with extreme care as this is a fragile macro.
9703 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9704 * @param a_rcTarget The variable that should receive the status code in case
9705 * of a longjmp/throw.
9706 */
9707/** @def IEM_CATCH_LONGJMP_END
9708 * End wrapper for catch / setjmp-else.
9709 *
9710 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
9711 * state.
9712 *
9713 * @note Use with extreme care as this is a fragile macro.
9714 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9715 */
9716#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
9717# ifdef IEM_WITH_THROW_CATCH
9718# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9719 a_rcTarget = VINF_SUCCESS; \
9720 try
9721# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9722 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
9723# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9724 catch (int rcThrown) \
9725 { \
9726 a_rcTarget = rcThrown
9727# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9728 } \
9729 ((void)0)
9730# else /* !IEM_WITH_THROW_CATCH */
9731# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9732 jmp_buf JmpBuf; \
9733 jmp_buf * volatile pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9734 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9735 if ((rcStrict = setjmp(JmpBuf)) == 0)
9736# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9737 pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9738 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9739 if ((rcStrict = setjmp(JmpBuf)) == 0)
9740# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9741 else \
9742 { \
9743 ((void)0)
9744# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9745 } \
9746 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
9747# endif /* !IEM_WITH_THROW_CATCH */
9748#endif /* IEM_WITH_SETJMP */
9749
9750
9751/**
9752 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9753 * IEMExecOneWithPrefetchedByPC.
9754 *
9755 * Similar code is found in IEMExecLots.
9756 *
9757 * @return Strict VBox status code.
9758 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9759 * @param fExecuteInhibit If set, execute the instruction following CLI,
9760 * POP SS and MOV SS,GR.
9761 * @param pszFunction The calling function name.
9762 */
9763DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9764{
9765 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9766 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9767 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9768 RT_NOREF_PV(pszFunction);
9769
9770#ifdef IEM_WITH_SETJMP
9771 VBOXSTRICTRC rcStrict;
9772 IEM_TRY_SETJMP(pVCpu, rcStrict)
9773 {
9774 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9775 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9776 }
9777 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9778 {
9779 pVCpu->iem.s.cLongJumps++;
9780 }
9781 IEM_CATCH_LONGJMP_END(pVCpu);
9782#else
9783 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9784 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9785#endif
9786 if (rcStrict == VINF_SUCCESS)
9787 pVCpu->iem.s.cInstructions++;
9788 if (pVCpu->iem.s.cActiveMappings > 0)
9789 {
9790 Assert(rcStrict != VINF_SUCCESS);
9791 iemMemRollback(pVCpu);
9792 }
9793 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9794 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9795 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9796
9797//#ifdef DEBUG
9798// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9799//#endif
9800
9801#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9802 /*
9803 * Perform any VMX nested-guest instruction boundary actions.
9804 *
9805 * If any of these causes a VM-exit, we must skip executing the next
9806 * instruction (would run into stale page tables). A VM-exit makes sure
9807 * there is no interrupt-inhibition, so that should ensure we don't go
9808 * to try execute the next instruction. Clearing fExecuteInhibit is
9809 * problematic because of the setjmp/longjmp clobbering above.
9810 */
9811 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9812 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9813 || rcStrict != VINF_SUCCESS)
9814 { /* likely */ }
9815 else
9816 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9817#endif
9818
9819 /* Execute the next instruction as well if a cli, pop ss or
9820 mov ss, Gr has just completed successfully. */
9821 if ( fExecuteInhibit
9822 && rcStrict == VINF_SUCCESS
9823 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9824 {
9825 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9826 if (rcStrict == VINF_SUCCESS)
9827 {
9828#ifdef LOG_ENABLED
9829 iemLogCurInstr(pVCpu, false, pszFunction);
9830#endif
9831#ifdef IEM_WITH_SETJMP
9832 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9833 {
9834 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9835 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9836 }
9837 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9838 {
9839 pVCpu->iem.s.cLongJumps++;
9840 }
9841 IEM_CATCH_LONGJMP_END(pVCpu);
9842#else
9843 IEM_OPCODE_GET_NEXT_U8(&b);
9844 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9845#endif
9846 if (rcStrict == VINF_SUCCESS)
9847 {
9848 pVCpu->iem.s.cInstructions++;
9849#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9850 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9851 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9852 { /* likely */ }
9853 else
9854 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9855#endif
9856 }
9857 if (pVCpu->iem.s.cActiveMappings > 0)
9858 {
9859 Assert(rcStrict != VINF_SUCCESS);
9860 iemMemRollback(pVCpu);
9861 }
9862 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9863 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9864 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9865 }
9866 else if (pVCpu->iem.s.cActiveMappings > 0)
9867 iemMemRollback(pVCpu);
9868 /** @todo drop this after we bake this change into RIP advancing. */
9869 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9870 }
9871
9872 /*
9873 * Return value fiddling, statistics and sanity assertions.
9874 */
9875 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9876
9877 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9878 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9879 return rcStrict;
9880}
9881
9882
9883/**
9884 * Execute one instruction.
9885 *
9886 * @return Strict VBox status code.
9887 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9888 */
9889VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9890{
9891 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9892#ifdef LOG_ENABLED
9893 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9894#endif
9895
9896 /*
9897 * Do the decoding and emulation.
9898 */
9899 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9900 if (rcStrict == VINF_SUCCESS)
9901 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9902 else if (pVCpu->iem.s.cActiveMappings > 0)
9903 iemMemRollback(pVCpu);
9904
9905 if (rcStrict != VINF_SUCCESS)
9906 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9907 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9908 return rcStrict;
9909}
9910
9911
9912VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9913{
9914 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9915 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9916 if (rcStrict == VINF_SUCCESS)
9917 {
9918 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9919 if (pcbWritten)
9920 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9921 }
9922 else if (pVCpu->iem.s.cActiveMappings > 0)
9923 iemMemRollback(pVCpu);
9924
9925 return rcStrict;
9926}
9927
9928
9929VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9930 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9931{
9932 VBOXSTRICTRC rcStrict;
9933 if ( cbOpcodeBytes
9934 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9935 {
9936 iemInitDecoder(pVCpu, false, false);
9937#ifdef IEM_WITH_CODE_TLB
9938 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9939 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9940 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9941 pVCpu->iem.s.offCurInstrStart = 0;
9942 pVCpu->iem.s.offInstrNextByte = 0;
9943#else
9944 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9945 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9946#endif
9947 rcStrict = VINF_SUCCESS;
9948 }
9949 else
9950 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9951 if (rcStrict == VINF_SUCCESS)
9952 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9953 else if (pVCpu->iem.s.cActiveMappings > 0)
9954 iemMemRollback(pVCpu);
9955
9956 return rcStrict;
9957}
9958
9959
9960VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9961{
9962 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9963 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9964 if (rcStrict == VINF_SUCCESS)
9965 {
9966 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9967 if (pcbWritten)
9968 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9969 }
9970 else if (pVCpu->iem.s.cActiveMappings > 0)
9971 iemMemRollback(pVCpu);
9972
9973 return rcStrict;
9974}
9975
9976
9977VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9978 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9979{
9980 VBOXSTRICTRC rcStrict;
9981 if ( cbOpcodeBytes
9982 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9983 {
9984 iemInitDecoder(pVCpu, true, false);
9985#ifdef IEM_WITH_CODE_TLB
9986 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9987 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9988 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9989 pVCpu->iem.s.offCurInstrStart = 0;
9990 pVCpu->iem.s.offInstrNextByte = 0;
9991#else
9992 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9993 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9994#endif
9995 rcStrict = VINF_SUCCESS;
9996 }
9997 else
9998 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9999 if (rcStrict == VINF_SUCCESS)
10000 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10001 else if (pVCpu->iem.s.cActiveMappings > 0)
10002 iemMemRollback(pVCpu);
10003
10004 return rcStrict;
10005}
10006
10007
10008/**
10009 * For handling split cacheline lock operations when the host has split-lock
10010 * detection enabled.
10011 *
10012 * This will cause the interpreter to disregard the lock prefix and implicit
10013 * locking (xchg).
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10017 */
10018VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10019{
10020 /*
10021 * Do the decoding and emulation.
10022 */
10023 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
10024 if (rcStrict == VINF_SUCCESS)
10025 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10026 else if (pVCpu->iem.s.cActiveMappings > 0)
10027 iemMemRollback(pVCpu);
10028
10029 if (rcStrict != VINF_SUCCESS)
10030 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10031 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10032 return rcStrict;
10033}
10034
10035
10036VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10037{
10038 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10039 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10040
10041 /*
10042 * See if there is an interrupt pending in TRPM, inject it if we can.
10043 */
10044 /** @todo What if we are injecting an exception and not an interrupt? Is that
10045 * possible here? For now we assert it is indeed only an interrupt. */
10046 if (!TRPMHasTrap(pVCpu))
10047 { /* likely */ }
10048 else
10049 {
10050 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10051 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10052 {
10053 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10054#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10055 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10056 if (fIntrEnabled)
10057 {
10058 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10059 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10060 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10061 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10062 else
10063 {
10064 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10065 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10066 }
10067 }
10068#else
10069 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10070#endif
10071 if (fIntrEnabled)
10072 {
10073 uint8_t u8TrapNo;
10074 TRPMEVENT enmType;
10075 uint32_t uErrCode;
10076 RTGCPTR uCr2;
10077 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10078 AssertRC(rc2);
10079 Assert(enmType == TRPM_HARDWARE_INT);
10080 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10081
10082 TRPMResetTrap(pVCpu);
10083
10084#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10085 /* Injecting an event may cause a VM-exit. */
10086 if ( rcStrict != VINF_SUCCESS
10087 && rcStrict != VINF_IEM_RAISED_XCPT)
10088 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10089#else
10090 NOREF(rcStrict);
10091#endif
10092 }
10093 }
10094 }
10095
10096 /*
10097 * Initial decoder init w/ prefetch, then setup setjmp.
10098 */
10099 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10100 if (rcStrict == VINF_SUCCESS)
10101 {
10102#ifdef IEM_WITH_SETJMP
10103 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10104 IEM_TRY_SETJMP(pVCpu, rcStrict)
10105#endif
10106 {
10107 /*
10108 * The run loop. We limit ourselves to 4096 instructions right now.
10109 */
10110 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10111 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10112 for (;;)
10113 {
10114 /*
10115 * Log the state.
10116 */
10117#ifdef LOG_ENABLED
10118 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10119#endif
10120
10121 /*
10122 * Do the decoding and emulation.
10123 */
10124 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10125 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10126#ifdef VBOX_STRICT
10127 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10128#endif
10129 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10130 {
10131 Assert(pVCpu->iem.s.cActiveMappings == 0);
10132 pVCpu->iem.s.cInstructions++;
10133
10134#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10135 /* Perform any VMX nested-guest instruction boundary actions. */
10136 uint64_t fCpu = pVCpu->fLocalForcedActions;
10137 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10138 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10139 { /* likely */ }
10140 else
10141 {
10142 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10143 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10144 fCpu = pVCpu->fLocalForcedActions;
10145 else
10146 {
10147 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10148 break;
10149 }
10150 }
10151#endif
10152 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10153 {
10154#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10155 uint64_t fCpu = pVCpu->fLocalForcedActions;
10156#endif
10157 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10158 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10159 | VMCPU_FF_TLB_FLUSH
10160 | VMCPU_FF_UNHALT );
10161
10162 if (RT_LIKELY( ( !fCpu
10163 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10164 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10165 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10166 {
10167 if (cMaxInstructionsGccStupidity-- > 0)
10168 {
10169 /* Poll timers every now an then according to the caller's specs. */
10170 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10171 || !TMTimerPollBool(pVM, pVCpu))
10172 {
10173 Assert(pVCpu->iem.s.cActiveMappings == 0);
10174 iemReInitDecoder(pVCpu);
10175 continue;
10176 }
10177 }
10178 }
10179 }
10180 Assert(pVCpu->iem.s.cActiveMappings == 0);
10181 }
10182 else if (pVCpu->iem.s.cActiveMappings > 0)
10183 iemMemRollback(pVCpu);
10184 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10185 break;
10186 }
10187 }
10188#ifdef IEM_WITH_SETJMP
10189 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10190 {
10191 if (pVCpu->iem.s.cActiveMappings > 0)
10192 iemMemRollback(pVCpu);
10193# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10194 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10195# endif
10196 pVCpu->iem.s.cLongJumps++;
10197 }
10198 IEM_CATCH_LONGJMP_END(pVCpu);
10199#endif
10200
10201 /*
10202 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10203 */
10204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10206 }
10207 else
10208 {
10209 if (pVCpu->iem.s.cActiveMappings > 0)
10210 iemMemRollback(pVCpu);
10211
10212#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10213 /*
10214 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10215 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10216 */
10217 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10218#endif
10219 }
10220
10221 /*
10222 * Maybe re-enter raw-mode and log.
10223 */
10224 if (rcStrict != VINF_SUCCESS)
10225 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10226 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10227 if (pcInstructions)
10228 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10229 return rcStrict;
10230}
10231
10232
10233/**
10234 * Interface used by EMExecuteExec, does exit statistics and limits.
10235 *
10236 * @returns Strict VBox status code.
10237 * @param pVCpu The cross context virtual CPU structure.
10238 * @param fWillExit To be defined.
10239 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10240 * @param cMaxInstructions Maximum number of instructions to execute.
10241 * @param cMaxInstructionsWithoutExits
10242 * The max number of instructions without exits.
10243 * @param pStats Where to return statistics.
10244 */
10245VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10246 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10247{
10248 NOREF(fWillExit); /** @todo define flexible exit crits */
10249
10250 /*
10251 * Initialize return stats.
10252 */
10253 pStats->cInstructions = 0;
10254 pStats->cExits = 0;
10255 pStats->cMaxExitDistance = 0;
10256 pStats->cReserved = 0;
10257
10258 /*
10259 * Initial decoder init w/ prefetch, then setup setjmp.
10260 */
10261 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10262 if (rcStrict == VINF_SUCCESS)
10263 {
10264#ifdef IEM_WITH_SETJMP
10265 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10266 IEM_TRY_SETJMP(pVCpu, rcStrict)
10267#endif
10268 {
10269#ifdef IN_RING0
10270 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10271#endif
10272 uint32_t cInstructionSinceLastExit = 0;
10273
10274 /*
10275 * The run loop. We limit ourselves to 4096 instructions right now.
10276 */
10277 PVM pVM = pVCpu->CTX_SUFF(pVM);
10278 for (;;)
10279 {
10280 /*
10281 * Log the state.
10282 */
10283#ifdef LOG_ENABLED
10284 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10285#endif
10286
10287 /*
10288 * Do the decoding and emulation.
10289 */
10290 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10291
10292 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10293 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10294
10295 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10296 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10297 {
10298 pStats->cExits += 1;
10299 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10300 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10301 cInstructionSinceLastExit = 0;
10302 }
10303
10304 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10305 {
10306 Assert(pVCpu->iem.s.cActiveMappings == 0);
10307 pVCpu->iem.s.cInstructions++;
10308 pStats->cInstructions++;
10309 cInstructionSinceLastExit++;
10310
10311#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10312 /* Perform any VMX nested-guest instruction boundary actions. */
10313 uint64_t fCpu = pVCpu->fLocalForcedActions;
10314 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10315 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10316 { /* likely */ }
10317 else
10318 {
10319 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10320 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10321 fCpu = pVCpu->fLocalForcedActions;
10322 else
10323 {
10324 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10325 break;
10326 }
10327 }
10328#endif
10329 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10330 {
10331#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10332 uint64_t fCpu = pVCpu->fLocalForcedActions;
10333#endif
10334 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10335 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10336 | VMCPU_FF_TLB_FLUSH
10337 | VMCPU_FF_UNHALT );
10338 if (RT_LIKELY( ( ( !fCpu
10339 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10340 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10341 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10342 || pStats->cInstructions < cMinInstructions))
10343 {
10344 if (pStats->cInstructions < cMaxInstructions)
10345 {
10346 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10347 {
10348#ifdef IN_RING0
10349 if ( !fCheckPreemptionPending
10350 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10351#endif
10352 {
10353 Assert(pVCpu->iem.s.cActiveMappings == 0);
10354 iemReInitDecoder(pVCpu);
10355 continue;
10356 }
10357#ifdef IN_RING0
10358 rcStrict = VINF_EM_RAW_INTERRUPT;
10359 break;
10360#endif
10361 }
10362 }
10363 }
10364 Assert(!(fCpu & VMCPU_FF_IEM));
10365 }
10366 Assert(pVCpu->iem.s.cActiveMappings == 0);
10367 }
10368 else if (pVCpu->iem.s.cActiveMappings > 0)
10369 iemMemRollback(pVCpu);
10370 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10371 break;
10372 }
10373 }
10374#ifdef IEM_WITH_SETJMP
10375 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10376 {
10377 if (pVCpu->iem.s.cActiveMappings > 0)
10378 iemMemRollback(pVCpu);
10379 pVCpu->iem.s.cLongJumps++;
10380 }
10381 IEM_CATCH_LONGJMP_END(pVCpu);
10382#endif
10383
10384 /*
10385 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10386 */
10387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10389 }
10390 else
10391 {
10392 if (pVCpu->iem.s.cActiveMappings > 0)
10393 iemMemRollback(pVCpu);
10394
10395#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10396 /*
10397 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10398 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10399 */
10400 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10401#endif
10402 }
10403
10404 /*
10405 * Maybe re-enter raw-mode and log.
10406 */
10407 if (rcStrict != VINF_SUCCESS)
10408 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10409 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10410 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10411 return rcStrict;
10412}
10413
10414
10415/**
10416 * Injects a trap, fault, abort, software interrupt or external interrupt.
10417 *
10418 * The parameter list matches TRPMQueryTrapAll pretty closely.
10419 *
10420 * @returns Strict VBox status code.
10421 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10422 * @param u8TrapNo The trap number.
10423 * @param enmType What type is it (trap/fault/abort), software
10424 * interrupt or hardware interrupt.
10425 * @param uErrCode The error code if applicable.
10426 * @param uCr2 The CR2 value if applicable.
10427 * @param cbInstr The instruction length (only relevant for
10428 * software interrupts).
10429 */
10430VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10431 uint8_t cbInstr)
10432{
10433 iemInitDecoder(pVCpu, false, false);
10434#ifdef DBGFTRACE_ENABLED
10435 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10436 u8TrapNo, enmType, uErrCode, uCr2);
10437#endif
10438
10439 uint32_t fFlags;
10440 switch (enmType)
10441 {
10442 case TRPM_HARDWARE_INT:
10443 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10444 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10445 uErrCode = uCr2 = 0;
10446 break;
10447
10448 case TRPM_SOFTWARE_INT:
10449 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10450 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10451 uErrCode = uCr2 = 0;
10452 break;
10453
10454 case TRPM_TRAP:
10455 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10456 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10457 if (u8TrapNo == X86_XCPT_PF)
10458 fFlags |= IEM_XCPT_FLAGS_CR2;
10459 switch (u8TrapNo)
10460 {
10461 case X86_XCPT_DF:
10462 case X86_XCPT_TS:
10463 case X86_XCPT_NP:
10464 case X86_XCPT_SS:
10465 case X86_XCPT_PF:
10466 case X86_XCPT_AC:
10467 case X86_XCPT_GP:
10468 fFlags |= IEM_XCPT_FLAGS_ERR;
10469 break;
10470 }
10471 break;
10472
10473 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10474 }
10475
10476 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10477
10478 if (pVCpu->iem.s.cActiveMappings > 0)
10479 iemMemRollback(pVCpu);
10480
10481 return rcStrict;
10482}
10483
10484
10485/**
10486 * Injects the active TRPM event.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure.
10490 */
10491VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10492{
10493#ifndef IEM_IMPLEMENTS_TASKSWITCH
10494 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10495#else
10496 uint8_t u8TrapNo;
10497 TRPMEVENT enmType;
10498 uint32_t uErrCode;
10499 RTGCUINTPTR uCr2;
10500 uint8_t cbInstr;
10501 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10502 if (RT_FAILURE(rc))
10503 return rc;
10504
10505 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10506 * ICEBP \#DB injection as a special case. */
10507 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10508#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10509 if (rcStrict == VINF_SVM_VMEXIT)
10510 rcStrict = VINF_SUCCESS;
10511#endif
10512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10513 if (rcStrict == VINF_VMX_VMEXIT)
10514 rcStrict = VINF_SUCCESS;
10515#endif
10516 /** @todo Are there any other codes that imply the event was successfully
10517 * delivered to the guest? See @bugref{6607}. */
10518 if ( rcStrict == VINF_SUCCESS
10519 || rcStrict == VINF_IEM_RAISED_XCPT)
10520 TRPMResetTrap(pVCpu);
10521
10522 return rcStrict;
10523#endif
10524}
10525
10526
10527VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10528{
10529 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10530 return VERR_NOT_IMPLEMENTED;
10531}
10532
10533
10534VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10535{
10536 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10537 return VERR_NOT_IMPLEMENTED;
10538}
10539
10540
10541/**
10542 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10543 *
10544 * This API ASSUMES that the caller has already verified that the guest code is
10545 * allowed to access the I/O port. (The I/O port is in the DX register in the
10546 * guest state.)
10547 *
10548 * @returns Strict VBox status code.
10549 * @param pVCpu The cross context virtual CPU structure.
10550 * @param cbValue The size of the I/O port access (1, 2, or 4).
10551 * @param enmAddrMode The addressing mode.
10552 * @param fRepPrefix Indicates whether a repeat prefix is used
10553 * (doesn't matter which for this instruction).
10554 * @param cbInstr The instruction length in bytes.
10555 * @param iEffSeg The effective segment address.
10556 * @param fIoChecked Whether the access to the I/O port has been
10557 * checked or not. It's typically checked in the
10558 * HM scenario.
10559 */
10560VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10561 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10562{
10563 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10564 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10565
10566 /*
10567 * State init.
10568 */
10569 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10570
10571 /*
10572 * Switch orgy for getting to the right handler.
10573 */
10574 VBOXSTRICTRC rcStrict;
10575 if (fRepPrefix)
10576 {
10577 switch (enmAddrMode)
10578 {
10579 case IEMMODE_16BIT:
10580 switch (cbValue)
10581 {
10582 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10583 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10584 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10585 default:
10586 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10587 }
10588 break;
10589
10590 case IEMMODE_32BIT:
10591 switch (cbValue)
10592 {
10593 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10594 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10595 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10596 default:
10597 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10598 }
10599 break;
10600
10601 case IEMMODE_64BIT:
10602 switch (cbValue)
10603 {
10604 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10605 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10606 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10607 default:
10608 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10609 }
10610 break;
10611
10612 default:
10613 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10614 }
10615 }
10616 else
10617 {
10618 switch (enmAddrMode)
10619 {
10620 case IEMMODE_16BIT:
10621 switch (cbValue)
10622 {
10623 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10624 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10625 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10626 default:
10627 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10628 }
10629 break;
10630
10631 case IEMMODE_32BIT:
10632 switch (cbValue)
10633 {
10634 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10635 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10636 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10637 default:
10638 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10639 }
10640 break;
10641
10642 case IEMMODE_64BIT:
10643 switch (cbValue)
10644 {
10645 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10646 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10647 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10648 default:
10649 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10650 }
10651 break;
10652
10653 default:
10654 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10655 }
10656 }
10657
10658 if (pVCpu->iem.s.cActiveMappings)
10659 iemMemRollback(pVCpu);
10660
10661 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10662}
10663
10664
10665/**
10666 * Interface for HM and EM for executing string I/O IN (read) instructions.
10667 *
10668 * This API ASSUMES that the caller has already verified that the guest code is
10669 * allowed to access the I/O port. (The I/O port is in the DX register in the
10670 * guest state.)
10671 *
10672 * @returns Strict VBox status code.
10673 * @param pVCpu The cross context virtual CPU structure.
10674 * @param cbValue The size of the I/O port access (1, 2, or 4).
10675 * @param enmAddrMode The addressing mode.
10676 * @param fRepPrefix Indicates whether a repeat prefix is used
10677 * (doesn't matter which for this instruction).
10678 * @param cbInstr The instruction length in bytes.
10679 * @param fIoChecked Whether the access to the I/O port has been
10680 * checked or not. It's typically checked in the
10681 * HM scenario.
10682 */
10683VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10684 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10685{
10686 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10687
10688 /*
10689 * State init.
10690 */
10691 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10692
10693 /*
10694 * Switch orgy for getting to the right handler.
10695 */
10696 VBOXSTRICTRC rcStrict;
10697 if (fRepPrefix)
10698 {
10699 switch (enmAddrMode)
10700 {
10701 case IEMMODE_16BIT:
10702 switch (cbValue)
10703 {
10704 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10705 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10706 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10707 default:
10708 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10709 }
10710 break;
10711
10712 case IEMMODE_32BIT:
10713 switch (cbValue)
10714 {
10715 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10716 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10717 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10718 default:
10719 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10720 }
10721 break;
10722
10723 case IEMMODE_64BIT:
10724 switch (cbValue)
10725 {
10726 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10727 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10728 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10729 default:
10730 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10731 }
10732 break;
10733
10734 default:
10735 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10736 }
10737 }
10738 else
10739 {
10740 switch (enmAddrMode)
10741 {
10742 case IEMMODE_16BIT:
10743 switch (cbValue)
10744 {
10745 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10746 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10747 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10748 default:
10749 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10750 }
10751 break;
10752
10753 case IEMMODE_32BIT:
10754 switch (cbValue)
10755 {
10756 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10757 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10758 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10759 default:
10760 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10761 }
10762 break;
10763
10764 case IEMMODE_64BIT:
10765 switch (cbValue)
10766 {
10767 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10768 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10769 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10770 default:
10771 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10772 }
10773 break;
10774
10775 default:
10776 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10777 }
10778 }
10779
10780 if ( pVCpu->iem.s.cActiveMappings == 0
10781 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10782 { /* likely */ }
10783 else
10784 {
10785 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10786 iemMemRollback(pVCpu);
10787 }
10788 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10789}
10790
10791
10792/**
10793 * Interface for rawmode to write execute an OUT instruction.
10794 *
10795 * @returns Strict VBox status code.
10796 * @param pVCpu The cross context virtual CPU structure.
10797 * @param cbInstr The instruction length in bytes.
10798 * @param u16Port The port to read.
10799 * @param fImm Whether the port is specified using an immediate operand or
10800 * using the implicit DX register.
10801 * @param cbReg The register size.
10802 *
10803 * @remarks In ring-0 not all of the state needs to be synced in.
10804 */
10805VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10806{
10807 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10808 Assert(cbReg <= 4 && cbReg != 3);
10809
10810 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10811 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10812 Assert(!pVCpu->iem.s.cActiveMappings);
10813 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10814}
10815
10816
10817/**
10818 * Interface for rawmode to write execute an IN instruction.
10819 *
10820 * @returns Strict VBox status code.
10821 * @param pVCpu The cross context virtual CPU structure.
10822 * @param cbInstr The instruction length in bytes.
10823 * @param u16Port The port to read.
10824 * @param fImm Whether the port is specified using an immediate operand or
10825 * using the implicit DX.
10826 * @param cbReg The register size.
10827 */
10828VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10829{
10830 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10831 Assert(cbReg <= 4 && cbReg != 3);
10832
10833 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10834 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10835 Assert(!pVCpu->iem.s.cActiveMappings);
10836 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10837}
10838
10839
10840/**
10841 * Interface for HM and EM to write to a CRx register.
10842 *
10843 * @returns Strict VBox status code.
10844 * @param pVCpu The cross context virtual CPU structure.
10845 * @param cbInstr The instruction length in bytes.
10846 * @param iCrReg The control register number (destination).
10847 * @param iGReg The general purpose register number (source).
10848 *
10849 * @remarks In ring-0 not all of the state needs to be synced in.
10850 */
10851VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10852{
10853 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10854 Assert(iCrReg < 16);
10855 Assert(iGReg < 16);
10856
10857 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10858 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10859 Assert(!pVCpu->iem.s.cActiveMappings);
10860 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10861}
10862
10863
10864/**
10865 * Interface for HM and EM to read from a CRx register.
10866 *
10867 * @returns Strict VBox status code.
10868 * @param pVCpu The cross context virtual CPU structure.
10869 * @param cbInstr The instruction length in bytes.
10870 * @param iGReg The general purpose register number (destination).
10871 * @param iCrReg The control register number (source).
10872 *
10873 * @remarks In ring-0 not all of the state needs to be synced in.
10874 */
10875VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10876{
10877 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10878 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10879 | CPUMCTX_EXTRN_APIC_TPR);
10880 Assert(iCrReg < 16);
10881 Assert(iGReg < 16);
10882
10883 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10884 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10885 Assert(!pVCpu->iem.s.cActiveMappings);
10886 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10887}
10888
10889
10890/**
10891 * Interface for HM and EM to write to a DRx register.
10892 *
10893 * @returns Strict VBox status code.
10894 * @param pVCpu The cross context virtual CPU structure.
10895 * @param cbInstr The instruction length in bytes.
10896 * @param iDrReg The debug register number (destination).
10897 * @param iGReg The general purpose register number (source).
10898 *
10899 * @remarks In ring-0 not all of the state needs to be synced in.
10900 */
10901VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10902{
10903 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10904 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
10905 Assert(iDrReg < 8);
10906 Assert(iGReg < 16);
10907
10908 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10909 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10910 Assert(!pVCpu->iem.s.cActiveMappings);
10911 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10912}
10913
10914
10915/**
10916 * Interface for HM and EM to read from a DRx register.
10917 *
10918 * @returns Strict VBox status code.
10919 * @param pVCpu The cross context virtual CPU structure.
10920 * @param cbInstr The instruction length in bytes.
10921 * @param iGReg The general purpose register number (destination).
10922 * @param iDrReg The debug register number (source).
10923 *
10924 * @remarks In ring-0 not all of the state needs to be synced in.
10925 */
10926VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10927{
10928 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10929 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
10930 Assert(iDrReg < 8);
10931 Assert(iGReg < 16);
10932
10933 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10934 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10935 Assert(!pVCpu->iem.s.cActiveMappings);
10936 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10937}
10938
10939
10940/**
10941 * Interface for HM and EM to clear the CR0[TS] bit.
10942 *
10943 * @returns Strict VBox status code.
10944 * @param pVCpu The cross context virtual CPU structure.
10945 * @param cbInstr The instruction length in bytes.
10946 *
10947 * @remarks In ring-0 not all of the state needs to be synced in.
10948 */
10949VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10950{
10951 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10952
10953 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10954 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10955 Assert(!pVCpu->iem.s.cActiveMappings);
10956 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10957}
10958
10959
10960/**
10961 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10962 *
10963 * @returns Strict VBox status code.
10964 * @param pVCpu The cross context virtual CPU structure.
10965 * @param cbInstr The instruction length in bytes.
10966 * @param uValue The value to load into CR0.
10967 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10968 * memory operand. Otherwise pass NIL_RTGCPTR.
10969 *
10970 * @remarks In ring-0 not all of the state needs to be synced in.
10971 */
10972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10973{
10974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10975
10976 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10977 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10978 Assert(!pVCpu->iem.s.cActiveMappings);
10979 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10980}
10981
10982
10983/**
10984 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10985 *
10986 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10987 *
10988 * @returns Strict VBox status code.
10989 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10990 * @param cbInstr The instruction length in bytes.
10991 * @remarks In ring-0 not all of the state needs to be synced in.
10992 * @thread EMT(pVCpu)
10993 */
10994VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10995{
10996 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10997
10998 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10999 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11000 Assert(!pVCpu->iem.s.cActiveMappings);
11001 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11002}
11003
11004
11005/**
11006 * Interface for HM and EM to emulate the WBINVD instruction.
11007 *
11008 * @returns Strict VBox status code.
11009 * @param pVCpu The cross context virtual CPU structure.
11010 * @param cbInstr The instruction length in bytes.
11011 *
11012 * @remarks In ring-0 not all of the state needs to be synced in.
11013 */
11014VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11015{
11016 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11017
11018 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11019 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11020 Assert(!pVCpu->iem.s.cActiveMappings);
11021 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11022}
11023
11024
11025/**
11026 * Interface for HM and EM to emulate the INVD instruction.
11027 *
11028 * @returns Strict VBox status code.
11029 * @param pVCpu The cross context virtual CPU structure.
11030 * @param cbInstr The instruction length in bytes.
11031 *
11032 * @remarks In ring-0 not all of the state needs to be synced in.
11033 */
11034VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11035{
11036 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11037
11038 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11039 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11040 Assert(!pVCpu->iem.s.cActiveMappings);
11041 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11042}
11043
11044
11045/**
11046 * Interface for HM and EM to emulate the INVLPG instruction.
11047 *
11048 * @returns Strict VBox status code.
11049 * @retval VINF_PGM_SYNC_CR3
11050 *
11051 * @param pVCpu The cross context virtual CPU structure.
11052 * @param cbInstr The instruction length in bytes.
11053 * @param GCPtrPage The effective address of the page to invalidate.
11054 *
11055 * @remarks In ring-0 not all of the state needs to be synced in.
11056 */
11057VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11058{
11059 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11060
11061 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11062 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11063 Assert(!pVCpu->iem.s.cActiveMappings);
11064 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11065}
11066
11067
11068/**
11069 * Interface for HM and EM to emulate the INVPCID instruction.
11070 *
11071 * @returns Strict VBox status code.
11072 * @retval VINF_PGM_SYNC_CR3
11073 *
11074 * @param pVCpu The cross context virtual CPU structure.
11075 * @param cbInstr The instruction length in bytes.
11076 * @param iEffSeg The effective segment register.
11077 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11078 * @param uType The invalidation type.
11079 *
11080 * @remarks In ring-0 not all of the state needs to be synced in.
11081 */
11082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11083 uint64_t uType)
11084{
11085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11086
11087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11089 Assert(!pVCpu->iem.s.cActiveMappings);
11090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11091}
11092
11093
11094/**
11095 * Interface for HM and EM to emulate the CPUID instruction.
11096 *
11097 * @returns Strict VBox status code.
11098 *
11099 * @param pVCpu The cross context virtual CPU structure.
11100 * @param cbInstr The instruction length in bytes.
11101 *
11102 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11103 */
11104VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11105{
11106 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11107 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11108
11109 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11110 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11111 Assert(!pVCpu->iem.s.cActiveMappings);
11112 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11113}
11114
11115
11116/**
11117 * Interface for HM and EM to emulate the RDPMC instruction.
11118 *
11119 * @returns Strict VBox status code.
11120 *
11121 * @param pVCpu The cross context virtual CPU structure.
11122 * @param cbInstr The instruction length in bytes.
11123 *
11124 * @remarks Not all of the state needs to be synced in.
11125 */
11126VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11127{
11128 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11129 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11130
11131 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11132 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11133 Assert(!pVCpu->iem.s.cActiveMappings);
11134 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11135}
11136
11137
11138/**
11139 * Interface for HM and EM to emulate the RDTSC instruction.
11140 *
11141 * @returns Strict VBox status code.
11142 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11143 *
11144 * @param pVCpu The cross context virtual CPU structure.
11145 * @param cbInstr The instruction length in bytes.
11146 *
11147 * @remarks Not all of the state needs to be synced in.
11148 */
11149VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11150{
11151 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11152 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11153
11154 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11155 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11156 Assert(!pVCpu->iem.s.cActiveMappings);
11157 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11158}
11159
11160
11161/**
11162 * Interface for HM and EM to emulate the RDTSCP instruction.
11163 *
11164 * @returns Strict VBox status code.
11165 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11166 *
11167 * @param pVCpu The cross context virtual CPU structure.
11168 * @param cbInstr The instruction length in bytes.
11169 *
11170 * @remarks Not all of the state needs to be synced in. Recommended
11171 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11172 */
11173VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11174{
11175 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11176 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11177
11178 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11179 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11180 Assert(!pVCpu->iem.s.cActiveMappings);
11181 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11182}
11183
11184
11185/**
11186 * Interface for HM and EM to emulate the RDMSR instruction.
11187 *
11188 * @returns Strict VBox status code.
11189 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11190 *
11191 * @param pVCpu The cross context virtual CPU structure.
11192 * @param cbInstr The instruction length in bytes.
11193 *
11194 * @remarks Not all of the state needs to be synced in. Requires RCX and
11195 * (currently) all MSRs.
11196 */
11197VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11198{
11199 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11201
11202 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11203 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11204 Assert(!pVCpu->iem.s.cActiveMappings);
11205 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11206}
11207
11208
11209/**
11210 * Interface for HM and EM to emulate the WRMSR instruction.
11211 *
11212 * @returns Strict VBox status code.
11213 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11214 *
11215 * @param pVCpu The cross context virtual CPU structure.
11216 * @param cbInstr The instruction length in bytes.
11217 *
11218 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11219 * and (currently) all MSRs.
11220 */
11221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11222{
11223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11224 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11225 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11226
11227 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11228 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11229 Assert(!pVCpu->iem.s.cActiveMappings);
11230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11231}
11232
11233
11234/**
11235 * Interface for HM and EM to emulate the MONITOR instruction.
11236 *
11237 * @returns Strict VBox status code.
11238 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11239 *
11240 * @param pVCpu The cross context virtual CPU structure.
11241 * @param cbInstr The instruction length in bytes.
11242 *
11243 * @remarks Not all of the state needs to be synced in.
11244 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11245 * are used.
11246 */
11247VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11248{
11249 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11250 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11251
11252 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11253 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11254 Assert(!pVCpu->iem.s.cActiveMappings);
11255 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11256}
11257
11258
11259/**
11260 * Interface for HM and EM to emulate the MWAIT instruction.
11261 *
11262 * @returns Strict VBox status code.
11263 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11264 *
11265 * @param pVCpu The cross context virtual CPU structure.
11266 * @param cbInstr The instruction length in bytes.
11267 *
11268 * @remarks Not all of the state needs to be synced in.
11269 */
11270VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11271{
11272 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11273 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11274
11275 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11276 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11277 Assert(!pVCpu->iem.s.cActiveMappings);
11278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11279}
11280
11281
11282/**
11283 * Interface for HM and EM to emulate the HLT instruction.
11284 *
11285 * @returns Strict VBox status code.
11286 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11287 *
11288 * @param pVCpu The cross context virtual CPU structure.
11289 * @param cbInstr The instruction length in bytes.
11290 *
11291 * @remarks Not all of the state needs to be synced in.
11292 */
11293VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11294{
11295 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11296
11297 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11298 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11299 Assert(!pVCpu->iem.s.cActiveMappings);
11300 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11301}
11302
11303
11304/**
11305 * Checks if IEM is in the process of delivering an event (interrupt or
11306 * exception).
11307 *
11308 * @returns true if we're in the process of raising an interrupt or exception,
11309 * false otherwise.
11310 * @param pVCpu The cross context virtual CPU structure.
11311 * @param puVector Where to store the vector associated with the
11312 * currently delivered event, optional.
11313 * @param pfFlags Where to store th event delivery flags (see
11314 * IEM_XCPT_FLAGS_XXX), optional.
11315 * @param puErr Where to store the error code associated with the
11316 * event, optional.
11317 * @param puCr2 Where to store the CR2 associated with the event,
11318 * optional.
11319 * @remarks The caller should check the flags to determine if the error code and
11320 * CR2 are valid for the event.
11321 */
11322VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11323{
11324 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11325 if (fRaisingXcpt)
11326 {
11327 if (puVector)
11328 *puVector = pVCpu->iem.s.uCurXcpt;
11329 if (pfFlags)
11330 *pfFlags = pVCpu->iem.s.fCurXcpt;
11331 if (puErr)
11332 *puErr = pVCpu->iem.s.uCurXcptErr;
11333 if (puCr2)
11334 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11335 }
11336 return fRaisingXcpt;
11337}
11338
11339#ifdef IN_RING3
11340
11341/**
11342 * Handles the unlikely and probably fatal merge cases.
11343 *
11344 * @returns Merged status code.
11345 * @param rcStrict Current EM status code.
11346 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11347 * with @a rcStrict.
11348 * @param iMemMap The memory mapping index. For error reporting only.
11349 * @param pVCpu The cross context virtual CPU structure of the calling
11350 * thread, for error reporting only.
11351 */
11352DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11353 unsigned iMemMap, PVMCPUCC pVCpu)
11354{
11355 if (RT_FAILURE_NP(rcStrict))
11356 return rcStrict;
11357
11358 if (RT_FAILURE_NP(rcStrictCommit))
11359 return rcStrictCommit;
11360
11361 if (rcStrict == rcStrictCommit)
11362 return rcStrictCommit;
11363
11364 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11365 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11366 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11367 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11369 return VERR_IOM_FF_STATUS_IPE;
11370}
11371
11372
11373/**
11374 * Helper for IOMR3ProcessForceFlag.
11375 *
11376 * @returns Merged status code.
11377 * @param rcStrict Current EM status code.
11378 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11379 * with @a rcStrict.
11380 * @param iMemMap The memory mapping index. For error reporting only.
11381 * @param pVCpu The cross context virtual CPU structure of the calling
11382 * thread, for error reporting only.
11383 */
11384DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11385{
11386 /* Simple. */
11387 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11388 return rcStrictCommit;
11389
11390 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11391 return rcStrict;
11392
11393 /* EM scheduling status codes. */
11394 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11395 && rcStrict <= VINF_EM_LAST))
11396 {
11397 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11398 && rcStrictCommit <= VINF_EM_LAST))
11399 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11400 }
11401
11402 /* Unlikely */
11403 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11404}
11405
11406
11407/**
11408 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11409 *
11410 * @returns Merge between @a rcStrict and what the commit operation returned.
11411 * @param pVM The cross context VM structure.
11412 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11413 * @param rcStrict The status code returned by ring-0 or raw-mode.
11414 */
11415VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11416{
11417 /*
11418 * Reset the pending commit.
11419 */
11420 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11421 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11422 ("%#x %#x %#x\n",
11423 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11424 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11425
11426 /*
11427 * Commit the pending bounce buffers (usually just one).
11428 */
11429 unsigned cBufs = 0;
11430 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11431 while (iMemMap-- > 0)
11432 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11433 {
11434 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11435 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11436 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11437
11438 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11439 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11440 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11441
11442 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11443 {
11444 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11446 pbBuf,
11447 cbFirst,
11448 PGMACCESSORIGIN_IEM);
11449 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11450 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11451 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11452 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11453 }
11454
11455 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11456 {
11457 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11459 pbBuf + cbFirst,
11460 cbSecond,
11461 PGMACCESSORIGIN_IEM);
11462 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11463 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11464 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11465 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11466 }
11467 cBufs++;
11468 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11469 }
11470
11471 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11472 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11473 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11474 pVCpu->iem.s.cActiveMappings = 0;
11475 return rcStrict;
11476}
11477
11478#endif /* IN_RING3 */
11479
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette